import datetime
import json
import os
import random
import re
import time

import PIL.Image
import keras.models
import numpy as np
from keras_preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split

import my_models
from keras.callbacks import ModelCheckpoint, EarlyStopping
import tensorflow
from matplotlib import pyplot as plt

import mat_util



def load_model_and_fit(ds_x, ds_y, model_best_save_filepath, model , epochs = 200 , batch_size=1 , normalize = False , patience = 0.2 ,aug_save_dir = None,log_path = ''):
    if not os.path.exists(log_path):
        os.mkdir(log_path)

    ds_x_shape = ds_x.shape
    ds_y_shape = ds_y.shape
    print(f'{ ds_x_shape=}')
    print(f'{ ds_y_shape=}')
    if normalize:
        #归一化
        ds_x = np.float16(ds_x)
        ds_y = np.float16(ds_y)
        x_min = np.min(ds_x)
        x_max = np.max(ds_x)
        y_min = np.min(ds_y)
        y_max = np.max(ds_y)
        ds_x = (ds_x - x_min) / (x_max - x_min)
        ds_y = (ds_y - y_min) / (y_max - y_min)
        print('归一化:',ds_x.dtype)
        print(f'{x_max=}')
        print(f'{x_min=}')
        print(f'{y_max=}')
        print(f'{y_min=}')
    else:
        print(f'{np.min(ds_x)=}')
        print(f'{np.max(ds_x)=}')
        print(f'{np.min(ds_y)=}')
        print(f'{np.max(ds_y)=}')

    checkpoint = ModelCheckpoint(filepath=model_best_save_filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    patience = int(epochs * patience)
    if patience > 50:
        patience = 50
    early_stop = EarlyStopping(monitor='val_loss', patience=patience, verbose=1, mode='auto')
    callback_list = [checkpoint, early_stop]

    tf_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_path)
    if not aug_save_dir is None or aug_save_dir == '':
        # 分离测试数据
        x_train, x_test, y_train, y_test = train_test_split(ds_x, ds_y, test_size=0.2, random_state=0)
        train_shape = x_train.shape
        test_shape = x_test.shape
        print(f'{train_shape=}')
        print(f'{test_shape=}')
        # 数据增强
        datagen = ImageDataGenerator(
            featurewise_center=True,
            featurewise_std_normalization=True,
            rotation_range=20,
            width_shift_range=0.2,
            height_shift_range=0.2,
            horizontal_flip=True)
        # 计算特征归一化所需的数量
        # （如果应用 ZCA 白化，将计算标准差，均值，主成分）
        datagen.fit(x_train.reshape(train_shape[0], train_shape[1], train_shape[2], 1))
        #训练
        history = model.fit(datagen.flow(x_train.reshape(train_shape[0], train_shape[1], train_shape[2], 1),
                                         y_train.reshape(train_shape[0], train_shape[1], train_shape[2], 1),
                                         batch_size=batch_size, save_to_dir=aug_save_dir),
                            validation_data=(x_test, y_test), verbose=1, epochs=epochs,callbacks=callback_list)
    else: #不采用数据增强的训练方法
        history = model.fit(ds_x,ds_y,validation_split=0.2, verbose=1, epochs=epochs, batch_size=batch_size,callbacks=callback_list)

    # accuracy的历史
    plt.plot(history.history['accuracy'])
    plt.plot(history.history['val_accuracy'])
    plt.title('model accuracy : ' + model_best_save_filepath)
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()

    # loss的历史
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss : ' + model_best_save_filepath)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()


def predict_img(test_path,model, saved_predicted_filepath='', normalize = False):
    if test_path.endswith('.mat'):
        test_x = mat_util.load_mat(test_path)
    else:
        test_x = PIL.Image.open(test_path)
    if saved_predicted_filepath is None or saved_predicted_filepath =='':
        saved_predicted_filepath = test_path.replace('test' , 'predict')
    if normalize:
        #归一化
        ds_x = np.float16(test_x)
        x_min = np.min(ds_x)
        x_max = np.max(ds_x)
        ds_x = (ds_x - x_min) / (x_max - x_min)
        print('归一化:',ds_x.dtype)
        print(f'{x_max=}')
        print(f'{x_min=}')
    else:
        print(f'{np.max(test_x)=}')
        print(f'{np.min(test_x)=}')
    # 预测
    predictions = model.predict(test_x)
    print(f'{predictions.shape=}')
    predictions = np.squeeze(predictions)
    if normalize:
        predictions = predictions/(predictions.max()-predictions.min())*255
    print(f'{predictions.shape=}')
    # 保存预测结果
    mat_util.save_mat(saved_predicted_filepath, predictions)
    json_model = model.to_json()
    with open(saved_predicted_filepath.replace('.h5' , '.json') , mode = 'a' , encoding='utf-8') as json_path :
        json.dump(json_model , json_path)


def is_digital(x):
    return re.search(r'\d' , x)

#分批训练一个文件夹下的所有数据集
def train_mat(train_path, initial_model, model_path='', devides = 1, joint=1, epochs =200, batch_size = 1, normalize = False, patience = 0.2, aug_save_path = None, log_path=''):
    type = train_path.split('\\')[-1]
    if len(type) == train_path:
        type = train_path.split('\\')[-1]
    if model_path == '':
        model_path = train_path
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    if not model_path.endswith('model.h5'):
        model_path =os.path.join(model_path , '{}_model.h5'.format(type))
    try:
        print('加载模型:{}'.format(model_path))
        model = keras.models.load_model(model_path)  # 增量学习
        initial_model = None
    except:
        print('初始化模型')
        #反射获取模型
        # model_clazz = getattr(my_models , model_name)# 初始化模型
        # model = model_clazz()
        model = initial_model

    if log_path == '':
        log_path = os.path.join(train_path , 'tf-log')
    files = os.listdir(train_path)
    files = list(filter(lambda x :re.search('x\d+' , x) , files))
    i=0
    j=0
    ds_x = None
    ds_y = None

    #将1个大mat分{devides}次训练
    if devides >1 :
        for file in files:
            x_path = file
            y_path = x_path.replace("x", "y")
            ds_x = mat_util.load_mat(x_path)
            ds_y = mat_util.load_mat(y_path)
            per_len = int(ds_x.shape[0]/devides)
            for i in range(devides):
                i += 1
                print('开始训练:' + x_path, '模型:' + model_path, "时间:" + datetime.datetime.now().strftime('%F %T'))
                start = (i-1)*per_len
                end = (i)*per_len
                load_model_and_fit(ds_x[start: end ,:,:], ds_y[start: end ,:,:], model_best_save_filepath = model_path, model = model,epochs=epochs , normalize=normalize ,patience=patience
                                   ,aug_save_path=aug_save_path,log_path=log_path)
                print('完成训练:' + x_path, '模型:' + model_path, "时间:" + datetime.datetime.now().strftime('%F %T'))
                tensorflow.keras.backend.clear_session()
                time.sleep(300)
    else:
        #将{joint}个mat拼接一起训练
        for file in files:
            x_path = file
            y_path = x_path.replace("x", "y")
            if ds_x is None:
                ds_x = mat_util.load_mat(os.path.join(train_path,x_path))
                ds_y = mat_util.load_mat(os.path.join(train_path,y_path))
            else:
                ds_x =np.concatenate((ds_x ,mat_util.load_mat(os.path.join(train_path,x_path))) , axis=0)
                ds_y =np.concatenate((ds_y ,mat_util.load_mat(os.path.join(train_path,y_path))) , axis=0)
            j+=1
            for i in range(4):
                x_sub = int(str(24) + str(i + 1))
                y_sub = int(str(24) + str(i + 5))
                a = random.randint(0, len(ds_x))
                x_1 = PIL.Image.fromarray(ds_x[a])
                plt.subplot(x_sub)
                plt.imshow(x_1, cmap='gray')
                plt.title(a)
                plt.axis('off')
                y_1 = PIL.Image.fromarray(ds_y[a])
                plt.subplot(y_sub)
                plt.imshow(y_1, cmap='gray')
                plt.title(a)
                plt.axis('off')
            plt.show()
            if j%joint ==0:
                i += 1
                print('开始训练:' +train_path+ x_path, '模型:' + model_path, "时间:" + datetime.datetime.now().strftime('%F %T'))
                load_model_and_fit(ds_x, ds_y, model_best_save_filepath = model_path, model = model, batch_size =batch_size,epochs=epochs ,normalize=normalize,patience=patience,aug_save_dir=aug_save_path,log_path=log_path)
                print('完成训练:'+train_path + x_path, '模型:' + model_path, "时间:" + datetime.datetime.now().strftime('%F %T'))
                tensorflow.keras.backend.clear_session()
                ds_x = ds_y = None
                time.sleep(300)

    test_path = os.path.join(train_path , '{}_test.mat'.format(type))
    saved_predicted_filepath = test_path.replace('test' , 'predict')
    print("开始预测:" + test_path)
    predict_img(test_path, model, saved_predicted_filepath, normalize=normalize)
    print("完成预测,结果保存至:" + saved_predicted_filepath)



if __name__ == '__main__':
    # import megengine as mge
    # mge.dtr.eviction_threshold = "24GB"  # 设置显存阈值为 11GB
    # mge.dtr.enable()  # 开启 DTR 显存优化

    #服务器训练
    # train_path = '\root\autodl-tmp\noise'
    # train_multiple_mat(train_path  , epochs=1000, batch_size=32,normalize = True ,patience = 0.05 ,log_path='\root\tf-logs')
    # tensorflow.keras.backend.clear_session()
    # time.sleep(60)
    # train_path = '\root\autodl-tmp\artifact'
    # train_multiple_mat(train_path  , epochs=1000, batch_size=32,normalize = True ,patience = 0.05, log_path='\root\tf-logs')
    # tensorflow.keras.backend.clear_session()
    # time.sleep(60)
    # train_path = '\root\autodl-tmp\detail_less'
    # train_multiple_mat(train_path  , epochs=1000, batch_size=32,normalize = True , patience = 0.05,log_path='\root\tf-logs')
    # tensorflow.keras.backend.clear_session()
    # time.sleep(60)
    #训练完成关机
    # os.system('shutdown -h now')

    # 本地训练
    # train_path= r'E:\download\Dataset\keras\train\local\noise'
    # aug_save_dir = os.path.join(train_path , 'aug')
    # train_multiple_mat(train_path ,epochs=500, batch_size=16)
    # train_path = r'E:\download\Dataset\keras\train\local\artifact'
    # aug_save_dir = os.path.join(train_path , 'aug')
    # train_mat(train_path, epochs=500, batch_size=16)
    train_path = r'E:\download\Dataset\keras\train\local\detail_less'
    aug_save_dir = os.path.join(train_path , 'aug')
    train_mat(train_path, initial_model=my_models.automap(), epochs=300, batch_size=16)
