# 要添加一个新单元，输入 ''
# 要添加一个新的标记单元，输入 ''
# # 导入依赖包
# -----------------------------导入第三方包--------------------------

import os
import pandas as pd
import numpy as np
import tensorflow as tf
#import keras
import time
import math
import seaborn as sns
import statsmodels.api as sm    # 0.11.1
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import tensorflow.keras.backend as Kbackend
import random

# pydot_ng 用于绘制网络图
import pydot_ng as pg  # 2.0.0
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.model_selection import train_test_split
from sklearn import model_selection
from sklearn.preprocessing import scale
from sklearn.metrics import mean_absolute_error
from pandas import set_option
# from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.callbacks import ReduceLROnPlateau
from pylab import *
from scipy import interpolate
from pygame import mixer

# calculate RMSE
from sklearn.metrics import mean_squared_error
from itertools import cycle
from tqdm import tqdm
# sklearn.metrics.mean_squared_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average', squared=True)
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html#sklearn.metrics.mean_squared_error


print(tf.__version__)
# numpy+mkl 版本为17.2  
# tensorboard，tensorflow 版本为2.1.0  
# pydot版本为1.4.1, graphviz 版本为0.13.2

# ## 导入自己的包
import senutil as sen
# from rbflayer import RBFLayer, InitCentersRandom
import senmodels as sms
import logging

mpl.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
set_option("display.max_rows", 15)
set_option('display.width', 200)
np.set_printoptions(suppress=True, threshold=5000)


# tf.config.experimental.list_physical_devices()



# 使用GPU训练时候，打开下面注释
os.environ['CUDA_VISIBLE_DEVICES'] = "0"  # 默认GPU id: "0"
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# physical_devices = tf.config.experimental.list_physical_devices('CPU')
# tf.config.experimental.set_memory_growth(physical_devices[0], True)

# physical_devices


'''
日志
方法原文链接：https://blog.csdn.net/qq_36187544/article/details/100893525
'''
def logger_config(log_path,logging_name):
    '''
    配置log
    :param log_path: 输出log路径
    :param logging_name: 记录中name，可随意
    :return:
    '''
    '''
    logger是日志对象，handler是流处理器，console是控制台输出（没有console也可以，将不会在控制台输出，会在日志文件中输出）
    '''
    # 获取logger对象,取名
    logger = logging.getLogger(logging_name)
    # 输出DEBUG及以上级别的信息，针对所有输出的第一层过滤
    logger.setLevel(level=logging.DEBUG)
    # 获取文件日志句柄并设置日志级别，第二层过滤
    handler = logging.FileHandler(log_path, encoding='UTF-8')
    handler.setLevel(logging.INFO)
    # 生成并设置文件日志格式
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # console相当于控制台输出，handler文件输出。获取流句柄并设置日志级别，第二层过滤
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    # 为logger对象添加句柄
    logger.addHandler(handler)
    logger.addHandler(console)
    return logger, handler




# # 准备工作（数据处理，结果保存位置定义）

# ## 定义数据集所在位置

# ### 训练集所在位置
# data/train ; data/train2
TrainDataPath = 'data/train2/'


# filename_AB:  
# data/train:(1) 井数据1_20190603_孔隙度_训练.csv; (2) 井数据2_20190718_孔隙度_训练.csv;  
# data/train2

filename_AB = '井数据2_20190718_训练_section_1and2-train.csv'
TrainDataPath = os.path.join(TrainDataPath,filename_AB)
print(TrainDataPath)

# ### 预测阶段测试集所在位置

TestDataPath = 'data/test2/'

# filename_A为预测曲线对应的常规曲线数据
# (1) 带岩心：  鄂37_井数据2_20190718.csv;   鄂74_井数据2_20190718.csv; 鄂49_井数据2_20190718.csv ；
# (2) 不带岩心数据：鄂37_SX_钟吉彬_20120514_001.csv; 鄂74_SX_高建英_20170731_001.csv; 鄂49_SX_陈阵_20150928_001.csv;    


filename_A = '22-鄂37_井数据2_20190718.csv'    

addR_well_name = filename_A.split(".")[0]
TestDataPath = os.path.join(TestDataPath,filename_A)

# ### 低分辨率数据集所在位置
use_low_R_data = False


# 低分辨率数据点的间隔，default 1 | 0.5
step_length = 1


noaddRDataPath = 'data/noaddR/'

# filename_C为低分辨率物性参数曲线
# filename_C: (1) YT2_SX_1m_shang.csv ; (2) YT2_SX_1m.csv

filename_C = ''
noaddRDataPath = os.path.join(noaddRDataPath,filename_C)


# ### 高分辨率数据集所在位置
# 用于验证增加分辨率后的数据和高分辨实测数据吻合度，实际中可能没有

use_high_R_data = True  # True | False


# 高分辨率相对于低分辨率的倍数, default value = 10 (对应于0.1m); 8 (对应于0.125m)
resolution = 8

HighRDataPath = 'data/testH/'

# filename_C_H为高分辨率元素曲线 filename_C_H: (1) YT2_YS_0.1m_shang.csv ; (2) YT2_YS_0.1m_xia.csv 

filename_C_H = '井数据2_20190718_训练_section_1and2-train.csv'  
# 井数据2_20190718_训练.csv    鄂37_井数据2_20190718.csv  鄂74_井数据2_20190718.csv
# 鄂49_井数据2_20190718.csv   22-鄂37_井数据2_20190718.csv
# 井数据2_20190718_训练_section_1-train.csv

HighRDataPath = os.path.join(HighRDataPath,filename_C_H)

# # 模型定义
# ## 定义自变量

# 定义要输入的维度AC、CNL、DEN、GR、RD、RS等

# input_vectors = ["AC","CNL","DEN","GR","RD","RS"]
input_vectors = ["AC","CNL","DEN","GR","RLLD","RLLS"]
# 加深度作为输入
# input_vectors = ["深度", "AC","CNL","DEN","GR","RLLD","RLLS"]
# input_vectors = ["AC","CNL","DEN","GR"]

# ## 定义因变量

# 定义要训练的参数模型
# 读取元素曲线训练数据，包括"Al","Ca","Fe","K","Mg","Na","Si"等元素曲线  
# element_name = |"Al"|"Ca"|"Fe"|"K"|"Mg"|"Na"|"Si"|   

# 定义要目标曲线PERM、POR、SW
element = ["孔隙度","饱和度","渗透率"]
# 公式计算的物性参数曲线
reference = ["POR","SW","PERM"]

tasks_num = len(element)

target_colors = ['#632423', '#0070C0','#00B0F0','#75DAFF','#00B050','#FFC000', '#FFFF00']

element_name =  "渗透率"
# "孔隙度"|"饱和度"|"渗透率"
reference_name = "PERM"
# "POR"|"SW"|"PERM"

# 样本权重
weight_coloum = "sample_weight"


# 深度列名称
DEPTH_col_name = "深度"     # "DEPTH"  | 深度


# ## 选择要使用的模型

# 择要使用的模型类型model_type:  
# (1)'RBF'(flag = 1);   
# (2)'DNN'(flag = 2);  
# (3)'LSTM','GRU','GRU2','DNN_2'(flag = 3);  
# (4)'BLSTM', 'BGRU'(flag = 3),'MyWaveNet';'BiLSTM_Self_Atten'; 'BiGRU_Self_Atten';目前最好三种   
# (5)'WaveNet','MyUNet'  
# (6)'BiLSTM_Atten','BiLstm_WaveNet_Atten','C_BiLSTM_Atten'  
# (7) (暂未完成)  'NAS','BiLSTM_Self_Atten';   
# (8)'C_BiLSTM_Atten','WaveNet_BLSTM','BiGRU_Atten','X_BiGRU_Atten','MultiTask', 'PSP_RBF' 


# model_type = "RBF"
# RBF、DNN、MyWaveNet(seq4)、LSTM(seq4)、GRU、BLSTM、BGRU、BiLSTM_Self_Atten

# model_type =   "BiGRU_Self_Atten"     # 'BiGRU_Atten' | "BiGRU_Self_Atten"
# 升级版 1
# model_type =   "C_BiGRU"      #  C_BiGRU 'C_BiGRU_Atten'| "C_BiGRU_Self_Atten"
# 升级版 2
# model_type =   "C_BiGRU_Skip"      #  C_BiGRU 'C_BiGRU_Atten'| "C_BiGRU_Self_Atten"
# 升级版 3
# model_type =    "C_BiGRU_Self_Atten_2"      #'C_BiGRU_Self_Atten_3' 'C_BiGRU_Atten_2' | "C_BiGRU_Self_Atten_2"
# 升级版 4
# model_type = "C_BiGRU_Self_Atten_4"
# 升级版 0
# model_type =  "PSP_RBF"    # 'MultiTask'  | "MultiTask_Self_Atten"  MultiTask_Self_Atten  //PSP_RBF 新方法
# 变幻版 1
# model_type = "MultiTask_MultiHeadAttenSeflAttnDNN"   # "MultiTask_MultiHeadAtten" | "MultiTask_MultiHeadAttenWithDNN" | "MultiTask_MultiHeadAttenSeflAttnDNN"
# 变幻版 1
# model_type = "PSP_DNN"
model_type =  'PSP-BiGRU(Parallel)'  # PSP_RBF_PARALLERL_SIMPLE |  PSP_RBF
                            # PSP_RBF_PARALLERL_SELFATTEN_MODEL  | PSP_RBF_SERIAL_SELFATTEN_MODEL=PSP_RBF
                            # PSP_RBF_SERIAL_2_SELFATTEN_MODEL
                            # C_LSTM_2   |    C_BiLSTM_2   |  C_BiLSTM_Self_Atten_2  | PSP-LSTM-SA(Serial)
                            # PSP-LSTM(Parallel) |  PSP-BiLSTM(Parallel)  |  PSP-BiLSTM-SA(Parallel)   |PSP-LSTM-SA(Parallel)
                            # PSP-GRU-SA(Parallel) | PSP-GRU-SA(Serial)
                            # PSP-BiGRU-SA(Parallel) | PSP-BiGRU-SA(Serial)==C_BiGRU_Self_Atten_2
                            # PSP-GRU(Parallel) | PSP-GRU(Serial)  
                            # PSP-BiGRU = C_BiGRU |   PSP-BiGRU(Parallel)             
                                                  
                                                  
                                                  
# ## 设置和模型相关的参数


print("model_type:",model_type)
flag = sms.inference_flag(model_type)

print("flag:",flag)


# ## 初始化训练模型结构参数

# 网络结构参数来自与senmodels模块


# MAX_SAMPLE_NUM = 2000
# 输入维度
data_dim = len(input_vectors)
seq_length = 20 #16  # 序列长度数 default 10,  TT1:4  J404:8
hidden_dim = 14  # 隐藏层神经元数 default 12, 24, 36, 输入维度的2倍 ; 对于双向序列网络，hidden_dim是单向的一半
# 输出维度
output_dim = 1  # 1  3
n_layers = 2 # LSTM layer 层数 default 4  
dropout_rate = 0.2   # 在训练过程中设置 default 0.2  ,Na,0.4

# 修改下面的参数不改变网络
learning_rate = 0.005   # default 0.01, 优选：0.005 ；0.008 可用0.0008 0.0001 0.001
# batch_size = 100 Na:
BATCH_SIZE = 1000
# iterations = 300
EPOCHS =  30  # 20  30

# input_vectors_dim = len(input_vectors)



model_para = sms.MyModelParameter(data_dim,seq_length, hidden_dim, output_dim,learning_rate,dropout_rate,n_layers,BATCH_SIZE,EPOCHS)
print("model_para.n_layers:",model_para.n_layers)


# ## 设定是训练操作还是测试操作

# 模型有两种阶段：  "train"(训练) | "test"(测试)


model_stage = "train"
# model_stage = "test"


# 训练模型是否使用权重


train_use_weight = False


# 是否训练分辨率增加模型，default:False | True


train_add_R_model = False



if model_stage == "train":
    well_name = filename_AB.split(".")[0]
    train_well_name = filename_AB.split(".")[0]
else:
    well_name = filename_A.split("_")[0]
    train_well_name = filename_AB.split(".")[0]
print(well_name,train_well_name)



# 训练完成是否播放音乐 True | False
paly_music = True # True | False



# 是否保存训练日志：default : False | True， 保存日志可以用tensorboard显示实时计算日志，但是日志文件占用空间
save_logs = False
# 训练日志保存位置
log_path = "logs"



# 是否使用batch_size_strategy default False , | True
batch_size_strategy = False # True |False



# 两种学习率适应方法:  default = 0
#(1)每隔10个epoch，学习率减小为原来的1/10, set value = 1;
#(2)当学习停滞时，减少2倍或10倍的学习率常常能获得较好的效果。value = 2
learning_rate_deacy_policy = 2 



# use_semi_seqlength = True

# 是否绘制模型图 default = False；  Value：True | False
plot_modelnet = True

# ## 训练阶段模型保存位置


print(model_stage)



Date = sen.tid_date()
model_child_dir_name = train_well_name + '_Seq_'+ str(seq_length)+ "/"
custom_model_child_dir = "J404_Seq_8_WaveNet/"



# 设置模型保存的文件夹
model_save_path = os.path.join("model/", 'element_' + model_type.lower() + "_train/")
#model_save_path = os.path.join("model/", 'element_' + model_type.lower() + "_train/",model_child_dir_name)
if os.path.exists(model_save_path):
    model_path = model_save_path
else:
    # os.mkdir(model_save_path)
    os.makedirs(model_save_path)
    model_path = model_save_path
print(model_path)



model_name = train_well_name + "_" + model_type.lower() + "_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
        learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS) + ".h5"
model_file = model_path + model_name
print("model_name:",model_name)
print("model_file:",model_file)



# 定义模型保存的json_name
json_name = train_well_name + "_" + model_type.lower() + "_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
        learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS) + ".json"
model_json = model_path + json_name
print(model_json)


# ## 测试操作加载模型存放位置


if model_stage == "test":   
    pred_model_json = model_json
    pred_model_file = model_file
#     custom_model_json =  "AB-J404-3961m-4240m_blstm_"+ element_name + "_4_layers__lr_0.005h_dim36_epoch_40.json"
#     custom_model_file =  "AB-J404-3961m-4240m_blstm_"+ element_name + "_4_layers__lr_0.005h_dim36_epoch_40.h5"
    
#     pred_model_json = os.path.join(model_path,custom_model_json)
#     pred_model_file = os.path.join(model_path,custom_model_file)
    
    if not (os.path.exists(pred_model_json) and os.path.exists(pred_model_file)):
        print("预测模型不存在，程序结束，请训练相应模型")
        exit()
        
# ## 定义算法结果图表文字保存位置
if model_stage == "train":
    # well_name = filename_AB.split("_")[0]
    # begin_depth = depth_log[0][0]
    # end_depth = depth_log[-1][0]
    
    training_img_file_saving_path = 'model_training_images/'
    child_dir_name = train_well_name + model_type.lower() + '_Seq_'+ str(seq_length)+ "_" + Date + "/"
    model_training_img_file_saving_path = os.path.join(training_img_file_saving_path,child_dir_name)
    model_training_img_name =  model_type + "_" + well_name + "_"+ element_name
    
    if not os.path.exists(model_training_img_file_saving_path):
        os.makedirs(model_training_img_file_saving_path)

if model_stage == "test": 
     
    model_testing_image_name =  model_type + "_" + well_name + "_" + element_name
    child_dir_name = well_name + model_type.lower() + '_Seq_'+ str(seq_length)+ "_" + Date + "/"
    testing_img_file_saving_path = 'model_testing_images/'
    model_testing_img_file_saving_path = os.path.join(testing_img_file_saving_path,child_dir_name)
    if not os.path.exists(model_testing_img_file_saving_path):
        os.makedirs(model_testing_img_file_saving_path)

font={'family':'SimHei',
     'style':'italic',
    'weight':'normal',
      'color':'red',
      'size':16
}

csv_file_saving_path = "PaperHistory/noDepthExperimentHistory/csv_results_train_20230829/"
if model_stage == "test": 
    csv_file_saving_path = os.path.join("csv_results_train_20230829/", model_type.lower() + "_test/")
else:
    csv_file_saving_path = os.path.join("csv_results_train_20230829/", model_type.lower() + "_train/")
if not os.path.exists(csv_file_saving_path):
    os.makedirs(csv_file_saving_path)
print(csv_file_saving_path)


# # 数据加载及处理

# 调用pandas的read_csv()方法时，默认使用C engine作为parser engine，而当文件名中含有中文的时候，用C engine在部分情况下就会出错。所以在调用read_csv()方法时指定engine为Python就可以解决问题了。
if model_stage == "train":
    AB_use = pd.read_csv(TrainDataPath,engine='python',encoding='GBK',sep=",")
    AB_use  = AB_use.dropna()
    for item in (input_vectors):
        AB_use[item] = pd.to_numeric(AB_use[item])
    print("Training data loading....")
else:
    A_read = pd.read_csv(TestDataPath,engine='python',encoding='GBK',sep=",")
    A_read  = A_read.dropna()
    print("Testing data loading....")

# for item in (input_vectors):
#     AB_use[item] = pd.to_numeric(AB_use[item])



if model_stage == "train":
    # print(AB_use)
    print(len(AB_use.columns))
    print(AB_use.info())
else:
    # print(A_read)
    print(len(A_read.columns))
    print(A_read.info())

use_depth_log = False
if model_stage == "train":
    # 绘制训练段曲线
    Y_ele = AB_use.loc[:, element]
    if DEPTH_col_name in AB_use.columns.values:
        use_depth_log = True
        depth_log = AB_use.loc[:, [DEPTH_col_name]]
        depth_log = np.array(depth_log)
        cucao_depth = np.array(depth_log)
        cucao_depth.shape = (len(cucao_depth),)
        begin_depth = depth_log[0][0]
        end_depth = depth_log[-1][0]
        print("begin_depth",begin_depth,"end_depth",end_depth)
        plt.figure(figsize=(16,16))
        plt.title("element_log")
        plt.plot(Y_ele[element_name],depth_log,color="red", label=element_name)
        plt.xlabel(element_name )
        plt.ylabel(DEPTH_col_name + "(m)")
        plt.legend(loc='best')
        plt.grid(True)
        # plt.show()
    else:
        plt.title("输入维度")
        sample_index = np.arange(len(AB_use))
        if flag == 1 or flag ==2:
            plt.plot(Y_ele[element_name],sample_index, label=element)
        else:
            for i in range(len(element)):
                plt.plot(Y_ele[element[i]],sample_index, label=element[i])
        plt.xlabel("归一化后值")
        plt.ylabel("样本点编号")
        plt.legend(loc='upper right')
        plt.grid(True)
        print("No Depth Information")
else:
    if DEPTH_col_name in A_read.columns.values:
        use_depth_log = True
        depth_log = A_read.loc[:, [DEPTH_col_name]]
        depth_log = np.array(depth_log)
        cucao_depth = np.array(depth_log)
        cucao_depth.shape = (len(cucao_depth),)
        if use_low_R_data == True:
            begin_depth = depth_log[0][0]    
        else:
            begin_depth = depth_log[seq_length][0]
        end_depth = depth_log[-1][0]
        print("begin_depth",begin_depth,"end_depth",end_depth)
    else:
        print("No Depth Information,all method is end!!!")
        # exit()
print("depth_log.shape:",depth_log.shape)


# ## 设置自变量应变量数据

# 电阻率曲线取对数值


if model_stage == "train":
    inputY = AB_use.loc[:, element]
    inputY_calc = AB_use.loc[:, reference]
    inputX = AB_use.loc[:,input_vectors]
    print(inputY)
    print(inputY_calc)
    if train_use_weight == False:
        sample_weight = np.ones(len(inputY))
        # print(AB_Y)
    else:
        # 设定权重矩阵
        sample_weight = AB_use.loc[:, weight_coloum]
else:
    inputX = A_read.loc[:,input_vectors]


# ### 电阻率取对数


electric_log = ["RD","RS","RLLD","RLLS"]
for item in electric_log:
    if item in input_vectors:
        print(item)
        for i in range(len(inputX)):
#             print(i,len(inputX))
            if (inputX[item][i]) <= 1:
                inputX[item][i] = 1 
        inputX[item] = np.log10(inputX[item])


# ### 线性变换渗透率范围


constant_value = 3

if model_stage == "train":
    if element_name == "渗透率" or flag == 4 or flag == 5:
        # transform_col_1 = "渗透率"
        # transform_col_2 = "PERM"
        inputY.loc[:,element_name] = np.log10(inputY.loc[:,element_name]) + constant_value
        inputY_calc.loc[:,reference_name] = np.log10(inputY_calc.loc[:,reference_name]) + constant_value
        print("渗透率需要进行变换")

# ## 归一化操作
# 设置输入曲线范围,由于取了对数，所以RD和RS设置下限-1
# if element_name == "孔隙度" or "饱和度":
AC = [180,420] # AC = [140,350]
CNL = [0,90]  # CNL = [-0.8,80]
DEN = [1,3]
GR = [0,220]  # GR = [0,350]
RD = [0,5]
RS = [0,5]
RLLD = [0,5]
RLLS = [0,5]

DEPTH = [2500, 4500]


# 测井曲线计算的物性参数
POR_CALC = [0,25]
SW_CALC = [0,100]
# PERM_CALC = [-3 + constant_value, 2 + constant_value]
PERM_CALC = [-3 + constant_value, 1 + constant_value]

# 岩石物理实验测量的物性参数
POR = [0,25]
SW = [0,100]
# PERM = [-3+ constant_value, 2 + constant_value]
PERM = [-3+ constant_value, 1  + constant_value]

# 创建字典，根据输入的数据维度调正归一化的内容
u_log = {"深度":DEPTH,"AC": AC, "CNL": CNL, "DEN": DEN, "GR": GR, "RD": RD, "RS": RS,"RLLD": RLLD, "RLLS": RLLS}
# e_log = {"POR": POR, "SW": SW, "PERM": PERM,"孔隙度": POR, "饱和度": SW, "渗透率": PERM}
e_log = {"孔隙度": POR, "饱和度": SW, "渗透率": PERM}
e_CALC_log = {"POR": POR_CALC, "SW": SW_CALC, "PERM": PERM_CALC}

u_log_name = []
# 关键在于 input_vectors''
for i in input_vectors:
    u_log_name.append(u_log[i])
    
print("u_log_name:",u_log_name)

e_log_name = []
for i in element:
    e_log_name.append(e_log[i])
# e_log_name

e_calc_log_name = []
for i in reference:
    e_calc_log_name.append(e_CALC_log[i])
# e_calc_log_name



# element



def zero_one_scaler(data,log_name):
    ''' Normalization'''
    result = data.copy()
    # result = np.zeros(data.shape)
    for i in range(len(log_name)):
                # 严格控制范围
        it_data = np.array(data.iloc[:,i])
        for j in range(len(it_data)):
            if it_data[j] < log_name[i][0]:
                # diff_value = random.uniform(0.01,0.02)
                it_data[j] = log_name[i][0]
            if it_data[j]  > log_name[i][1]:
                # prop_rate = random.uniform(0.95,1)
                it_data[j] = log_name[i][1]
        
        numerator = it_data - log_name[i][0]
        # numerator_1 = data.iloc[:,i]-log_name[i][1]
        denominator = log_name[i][1]-log_name[i][0]
        result.iloc[:,i] = numerator / (denominator + 1e-8)
        
    return  result



AB_G = sen.zero_one_scaler(inputX,u_log_name)
print(AB_G.shape)

if model_stage == "train":
    inputY_train = sen.zero_one_scaler(inputY,e_log_name)
    inputY_calc_train = sen.zero_one_scaler(inputY_calc,e_calc_log_name)
    if flag == 4 or flag == 5:
        AB_Y_G = inputY_train.loc[:,element]
        AB_Y_calc_G = inputY_calc_train.loc[:,reference]
    else:
        AB_Y_G = inputY_train.loc[:,[element_name]]
        AB_Y_calc_G = inputY_calc_train.loc[:,[reference_name]]
    print(AB_Y_G.shape)
    print(AB_Y_calc_G.shape)
print("------------------zero_one_scaler is finished!----------------------------")



# print(AB_G)
# AB_Y_G.loc[:, [element_name]]
if model_stage == "train":
    # if model_type == 'C_BiLSTM_Atten':
    if flag == 4 or flag == 5:
        AB_Y_G_Smooth = []
        for i in range(len(element)):
            cu = AB_Y_G.loc[:, element[i]]
            cucao = np.array(cu)
            print(type(cucao),cucao.shape)
            cucao.shape = (len(cucao),)
            AB_Y_G_S = sen.fast_moving_average(cucao,5)
            AB_Y_G_Smooth.append(AB_Y_G_S)
    else:
        cu = AB_Y_G.loc[:, [element_name]]
        cucao = np.array(cu)
        print(type(cucao),cucao.shape)
        cucao.shape = (len(cucao),)
        AB_Y_G_Smooth = sen.fast_moving_average(cucao,5)
    



# AB_Y_G[element[0]]



plt.figure(figsize=(15,20))

plt.subplot(131)
plt.title("输入维度")
sample_index = np.arange(len(AB_G))
for i in range(len(input_vectors)):
    plt.plot(AB_G[input_vectors[i]],sample_index, label=input_vectors[i])
plt.xlabel("归一化后值")
plt.ylabel("样本点编号")
plt.legend(loc='upper right')
plt.grid(True)

if model_stage == "train":
    plt.subplot(132)
    plt.title("目标维度")
    # if model_type == 'C_BiLSTM_Atten':
    if flag == 4 or flag == 5:
        for i in range(len(element)):
            plt.plot(AB_Y_G[element[i]],sample_index, label = element[i])
    else:
        plt.plot(AB_Y_G,sample_index, label = element_name)
    plt.xlabel("归一化后值")
    plt.ylabel("样本点编号")
    plt.legend(loc='upper right')
    plt.grid(True)
    
    plt.subplot(133)
    plt.title("计算的目标维度")
    # if model_type == 'C_BiLSTM_Atten':
    if flag == 4 or flag == 5:
        for i in range(len(reference)):
            plt.plot(AB_Y_calc_G[reference[i]],sample_index, label = reference[i] + "_clac")
    else:
        plt.plot(AB_Y_calc_G,sample_index, label = reference_name + "_clac")
    plt.xlabel("归一化后值")
    plt.ylabel("样本点编号")
    plt.legend(loc='upper right')
    plt.grid(True)
    plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + element_name +  '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
            learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '_tendency.png', dpi=96,  bbox_inches='tight')

if model_stage == "test":
    plt.savefig(model_testing_img_file_saving_path + model_testing_image_name +"_" + element_name  + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
            learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '_tendency.png', dpi=96,  bbox_inches='tight')
# plt.show()



if model_stage == "train":
    # 下面一行代码比较重要
    element_flag = element.index(element_name)
    # if model_type == 'C_BiLSTM_Atten':
    if flag == 4 or flag == 5:
        plt.figure(figsize=(12, 4))
        for i in range(len(element)):
            fig_no = "13" + str(i+1)
            plt.subplot(int(fig_no))
            plt.title(str(element[i])+ " vs " + str(reference[i]) + " relationship")
            plt.scatter(AB_Y_calc_G[reference[i]],AB_Y_G[element[i]])
            plt.xlabel(reference[i])
            plt.ylabel(element[i])
            plt.xlim(-0.05,1.05)
            plt.ylim(-0.05,1.05)
            plt.grid(True)
    else:
        plt.figure(figsize=(8, 8))
        plt.title("relationship")
        plt.scatter(AB_Y_calc_G,AB_Y_G)
        plt.xlabel(reference[element_flag])
        plt.ylabel(element[element_flag])
        plt.xlim(-0.05,1.05)
        plt.ylim(-0.05,1.05)
        plt.grid(True)
    # plt.show()
        # plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + 'ValAll.jpg', dpi=220,  bbox_inches='tight')

    #     print("You Input a wrong Target Parameter!")







if model_stage == "train":
    # 下面一行代码比较重要
    element_flag = element.index(element_name)
    # if model_type == 'C_BiLSTM_Atten':
    if flag == 4 or flag == 5:
        plt.figure(figsize=(12, 4))
        for i in range(len(element)):
            fig_no = "13" + str(i+1)
            plt.subplot(int(fig_no))
            plt.title(str(element[i])+ " distribution")
            sns.distplot(inputY[element[i]],label = element[i], kde=False,hist=True,color=target_colors[i])
            plt.grid(True)
            plt.legend(loc='best')
    else:
        plt.figure(figsize=(8, 4))
        plt.title("distribution")
        sns.distplot(AB_Y_G,label = element_name, kde=True,hist=True,color='fuchsia')
        plt.grid(True)
        plt.legend(loc='best')
    # plt.show()


# ## 输入自变量数据准备


if model_stage == "train":
    AB_Y = np.array(AB_Y_G)
    AB_Y_calc = np.array(AB_Y_calc_G)
    print("AB_Y.shape:",AB_Y.shape)
AB_X = np.array(AB_G)



# AB_X


# ### 训练集验证集划分

# 根据模型需要判定是否需要序列化


if (flag == 1) or (flag == 2):
    print("不需要序列化")
    if model_stage == "train":
        # 训练阶段
        dataX = AB_X
        dataY = AB_Y
        dataY_calc = AB_Y_calc
        sss = model_selection.ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
        for train_index, test_index in sss.split(dataX):
        # print("TRAIN:", train_index, "TEST:", test_index)
            train_X, test_X = dataX[train_index], dataX[test_index]
            train_Y, test_Y = dataY[train_index], dataY[test_index]
            train_Y_calc, test_Y_calc = dataY_calc[train_index], dataY_calc[test_index]
            train_weight, test_weight = sample_weight[train_index],sample_weight[test_index]
    else:
        # 测试阶段
        testALL_A_X = AB_X
    if use_depth_log == True:
        DEPTH_AddReslution = depth_log
    else:
        DEPTH_AddReslution = None
else:
    
    if model_stage == "train":
#         if (flag == 1) or (flag == 2):
#             print("不需要序列化")
#             dataX = AB_X
#             dataY = AB_Y
#             dataY_calc = AB_Y_calc
#         else:
        if  flag == 5:
            print("不需要序列化")
            dataX, dataY = AB_X, AB_Y
            dataY_calc = AB_Y_calc
            weight_matrix = sample_weight
        else:
            print("序列化")
            dataX, dataY = sen.build_All_Train_dataset(AB_X, AB_Y, seq_length)
            dataY_calc = sen.build_All_Y_dataset( AB_Y_calc, seq_length)
            weight_matrix = sen.build_All_Y_dataset(sample_weight,seq_length)
            # 使用model_selection.ShuffleSplit抽取样本
        sss = model_selection.ShuffleSplit(n_splits = 10, test_size=0.2, random_state=0)
        for train_index, test_index in sss.split(dataX):
        # print("TRAIN:", train_index, "TEST:", test_index)
            trainX, testX = dataX[train_index], dataX[test_index]
            trainY, testY = dataY[train_index], dataY[test_index]
            train_Y_calc, test_Y_calc = dataY_calc[train_index], dataY_calc[test_index]
            train_weight, test_weight = weight_matrix[train_index],weight_matrix[test_index]
            
    else:
        # 测试阶段
        testALL_A_X = sen.build_All_A_dataset(AB_X, seq_length)
    if use_depth_log == True and flag != 5:
        DEPTH_AddReslution = sen.build_addReslution_DEPTH(depth_log, seq_length)
    elif flag == 5:
        DEPTH_AddReslution = depth_log
    else:
        DEPTH_AddReslution = None

# ### 确认输入维度

if (flag == 1) or (flag == 2):
    if model_stage == "train":
        print("input_vectors.length:",len(input_vectors))
        print("train_X.shape:", train_X.shape,"test_X.shape:", test_X.shape)
        print("train_Y.shape:", train_Y.shape,"test_Y.shape:", test_Y.shape)
        print("train_Y_calc.shape:", train_Y_calc.shape,"test_Y_calc.shape:", test_Y_calc.shape)
        print("train_weight.shape:", train_weight.shape,"test_weight.shape:", test_weight.shape)
    else:
        print("testALL_A_X.shape:", testALL_A_X.shape,"\n","input_vectors.length:",len(input_vectors))
else:
    if model_stage == "train":
        print("input_vectors.length:",len(input_vectors))
        print("trainX.shape:", trainX.shape,"testX.shape:", testX.shape) 
        print("trainY.shape:", trainY.shape,"testY.shape:", testY.shape)
        print("train_Y_calc.shape:", train_Y_calc.shape,"test_Y_calc.shape:", test_Y_calc.shape)
        print("train_weight.shape:", train_weight.shape,"test_weight.shape:", test_weight.shape)
    else:
        print("testALL_A_X.shape:", testALL_A_X.shape,"\n","input_vectors.length:",len(input_vectors))







if flag != 4 and flag != 5:
    if (model_stage == "train"):
        if flag == 1 or flag ==2:
            rmse_val = np.sqrt(mean_squared_error(test_Y, test_Y_calc))
            rmae_val = np.sqrt(mean_absolute_error(test_Y, test_Y_calc)) 
            ols_val = sm.OLS(test_Y, test_Y_calc).fit()
        else:
            rmse_val = np.sqrt(mean_squared_error(testY, test_Y_calc))
            rmae_val = np.sqrt(mean_absolute_error(testY, test_Y_calc)) 
            ols_val = sm.OLS(testY, test_Y_calc).fit()
        print('计算值与真实标定值线性相关性分析')
        print(ols_val.summary())
        print('Test RMSE_Val: %.4f' % rmse_val)
        print('Test RMAE_Val: %.4f' % rmae_val)
else:
    if (model_stage == "train"):
        for i in range(0,len(element)):
            print(i)
            use_x = testY[:,i]
            use_y = test_Y_calc[:,i]
            rmse_val = np.sqrt(mean_squared_error(use_x,use_y ))
            rmae_val = np.sqrt(mean_absolute_error(use_x, use_y)) 
            ols0_val = sm.OLS(use_x, use_y).fit()
            print('计算值与真实标定值线性相关性分析——',element[i])
            print(ols0_val.summary())
            print('Test RMSE_Val: %.4f' % rmse_val)
            print('Test RMAE_Val: %.4f' % rmae_val)

if model_stage == "train":
    print(train_weight)


# # 网络实例化

# ## 构建网络或载入模型


def model_type_select(model_type):
    if model_type == 'DNN':
        return sms.dnn_model(model_para)
    elif model_type == 'DNN_2':
        return sms.dnn_model_2(model_para)
    elif model_type == 'RBF':
        n_layers = 3
        return sms.rbf_model(train_X,model_para)
    elif model_type == 'LSTM':
        return sms.lstm_cell_model(model_para)
    elif model_type == 'GRU':
        return sms.gru_cell_model(model_para)
    elif model_type == 'GRU2':
        return sms.gru_block_cell_2(model_para)
    # elif model_type == 'NAS':
    #     return nas_cell()
    elif model_type == 'BLSTM':
        return sms.bi_lstm_cell_model(model_para)
    elif model_type == 'BGRU':
        return sms.bi_gru_cell_model(model_para)
    elif model_type == 'WaveNet':
        return sms.wavenet_model(model_para)
    elif model_type == 'MyWaveNet':
        return sms.wavenet_model2(model_para)
    elif model_type == 'MyUNet':
        return sms.my_unet(model_para)
    elif model_type == 'Capsule':
        return sms.bilstm_capsule_model(model_para)
    elif model_type == 'WaveNet_BLSTM':
        return sms.bilstm_wavenet_atten_model(model_para)
    elif model_type == "C_BiLSTM_Atten":
        return sms.C_bilstm_atten_2_model(model_para,tasks_num)
    elif model_type == "C_BiGRU":
        return sms.C_bigru_model(model_para,tasks_num)
    elif model_type == "C_BiGRU_Skip":
        return sms.C_bigru_skip_model(model_para,tasks_num)
    elif model_type == "BiLSTM_Self_Atten":
        return sms.bilstm_self_atten_model(model_para)
    elif model_type == "BiGRU_Atten":
        return sms.bigru_atten_model(model_para)
    elif model_type == "BiGRU_Self_Atten":
        return sms.bigru_self_atten_model(model_para)
    elif model_type == "C_BiLSTM_Self_Atten_2":
        return sms.C_bilstm_self_atten_2_model(model_para,tasks_num)
    elif model_type == "C_BiLSTM_2":
        return sms.C_bilstm_2_model(model_para,tasks_num)
    elif model_type == "C_LSTM_2":
        return sms.C_lstm_2_model(model_para,tasks_num)
    elif model_type == "PSP-BiLSTM-SA(Parallel)":
        return sms.bilstm_pallerl_selfatten_model(model_para,tasks_num)
    elif model_type == "PSP-BiLSTM(Parallel)":
        return sms.bilstm_pallerl_model(model_para,tasks_num)
    elif model_type == "PSP-BiGRU(Parallel)":
        return sms.bigru_pallerl_model(model_para,tasks_num)
    elif model_type == "PSP-LSTM(Parallel)":
        return sms.lstm_pallerl_model(model_para,tasks_num)
    elif model_type == "PSP-LSTM-SA(Serial)":
        return sms.C_lstm_self_atten_2_model(model_para,tasks_num)
    elif model_type == "PSP-LSTM-SA(Parallel)":
        return sms.lstm_pallerl_selfatten_model(model_para,tasks_num)
    elif model_type == "PSP-GRU-SA(Parallel)":
        return sms.gru_pallerl_selfatten_model(model_para,tasks_num)
    elif model_type == "PSP-GRU-SA(Serial)":
        return sms.C_gru_self_atten_2_model(model_para,tasks_num)
    elif model_type == "PSP-GRU(Parallel)":
        return sms.gru_pallerl_model(model_para,tasks_num)
    elif model_type == "PSP-GRU(Serial)":
        return sms.C_gru_2_model(model_para,tasks_num)
    elif model_type == "PSP-BiGRU-SA(Parallel)":
        return sms.bigru_pallerl_selfatten_model(model_para,tasks_num)
    elif model_type == "C_BiGRU_Atten":
        return sms.C_bigru_atten_model(model_para,tasks_num)
    elif model_type == "C_BiGRU_Self_Atten":
        return sms.C_bigru_self_atten_model(model_para,tasks_num)
    elif model_type == "C_BiGRU_Atten_2":
        return sms.C_bigru_atten_2_model(model_para,tasks_num)
    elif model_type == "C_BiGRU_Self_Atten_2":
        return sms.C_bigru_self_atten_2_model(model_para,tasks_num)
    elif model_type == "C_BiGRU_Self_Atten_3":
        return sms.C_bigru_self_atten_3_model(model_para,tasks_num)
    elif model_type == "C_BiGRU_Self_Atten_4":
        return sms.C_bigru_self_atten_4_model(model_para,tasks_num)
    elif model_type == "X_BiGRU_Atten":
        return sms.X_bigru_atten_model(model_para,tasks_num)
    elif model_type == "MultiTask":
        return sms.multi_task_model(model_para,tasks_num)
    elif model_type == "MultiTask_Self_Atten":
        return sms.multi_task_self_atten_model(model_para,tasks_num)
    elif model_type == "MultiTask_MultiHeadAtten":
        return sms.multi_task_multihead_atten_model(model_para,tasks_num)
    elif model_type == "MultiTask_MultiHeadAttenWithDNN":
        return sms.multi_task_multihead_atten(model_para,tasks_num)
    elif model_type =="MultiTask_MultiHeadAttenSeflAttnDNN":
        return sms.multi_task_multihead_selfatten_dnn_model(model_para,tasks_num)
    elif model_type == "PSP_RBF":
        return sms.psp_rbf_net(trainX, model_para, tasks_num)
    elif model_type == "PSP_DNN":
        return sms.psp_dnn_net_seq(model_para, tasks_num)
    elif model_type == "PSP_DNN_PARALLERL_SIMPLE":
        return sms.psp_dnn_net_pallerl_simple(model_para, tasks_num)
    elif model_type == "PSP_RBF_PARALLERL_SIMPLE":
        return sms.psp_rbf_net_pallerl_simple(model_para, tasks_num)
    elif model_type == "PSP_RBF_PARALLERL_MULTIHEAD":
        return sms.psp_rbf_net_pallerl_multihead(trainX, model_para, tasks_num)
    elif model_type == "PSP_RBF_PARALLERL_SELFATTEN_MODEL":
        return sms.psp_rbf_net_pallerl_selfatten_model(model_para, tasks_num)
    elif model_type == "PSP_RBF_SERIAL_SELFATTEN_MODEL":
        return sms.psp_rbf_net_serial_selfatten_model(model_para, tasks_num)
    elif model_type == "PSP_RBF_SERIAL_2_SELFATTEN_MODEL":
        return sms.psp_rbf_net_serial_2_selfatten_model(model_para, tasks_num)
    elif model_type == "PSP_PARALLERL_SELFATTEN_MODEL":
        return sms.psp_pallerl_selfatten_model(model_para, tasks_num)
    else:
        print("------------------input correct model type!----------------------------")
        # return sms.C_bilstm_atten_3_model(model_para,tasks_num)


if model_stage == "test":
    print(pred_model_file)

# ## 模型可视化
model = tf.keras.Model()
if model_stage == "train":
    model =  model_type_select(model_type)
else:
    with open(pred_model_json, "r") as json_file_1:
        json_config_1 = json_file_1.read()
        # 此处加载模型无需判断 
        model = tf.keras.models.model_from_json(json_config_1,custom_objects={'GlorotUniform': tf.keras.initializers.GlorotUniform(),
             'Zeros': tf.keras.initializers.Zeros(),
             'RBFLayer': sms.RBFLayer,
             'AttentionLayer': sms.AttentionLayer,
             'My_Attention_layer': sms.My_Attention_layer,
             'MultiHeadAttention':sms.MultiHeadAttention})
        model.load_weights(pred_model_file)
print(model.summary())



# 定义结果打印函数
class PrintDot(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs):
        if epoch % 10 == 0:
            print('已经训练完'+ epoch +'Epoch')
            print('.', end='')



#if  pg.find_graphviz() is not None:
if model_stage == "train":
    model_image_path = model_path
else:
    model_image_path = csv_file_saving_path
if plot_modelnet == True:
    tf.keras.utils.plot_model(model,to_file= model_image_path + element_name + '_' + model_type +   '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
        learning_rate) + "h_dim" + str(hidden_dim) +  '_net.png',
               show_shapes=True,
               show_layer_names=True,
               rankdir='TB',
               expand_nested=False,
               dpi=96)


# ## 日志保存内容设定


current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")

if not os.path.exists(log_path):
    # 针对第一次训练
    os.makedirs(log_path)
my_log_dir = os.path.join(log_path,model_type.lower())
print("my_log_dir:",my_log_dir)
if not os.path.exists(my_log_dir):
    os.makedirs(my_log_dir)

str_name = model_name.split(".h5")[0]
log_name = str_name + "-{}".format(int(time.time()))
print(my_log_dir + '\{}'.format(log_name))

# 设置日志记录对象
log_file_name = f'{log_name}_log.txt'
log_sava_path = os.path.join(my_log_dir,log_file_name)
logger, handler = logger_config(log_path=log_sava_path, logging_name=log_name)



my_callbacks = [
    tf.keras.callbacks.TensorBoard(log_dir =  my_log_dir + '\{}'.format(log_name)),
    tf.keras.callbacks.ModelCheckpoint(model_file,
                                save_best_only=True),
    tf.keras.callbacks.EarlyStopping(patience=5,mode='auto', min_delta=1e-9),
]


# tensorboard = TensorBoard(log_dir = my_log_dir + '\{}'.format(log_name))


# # 模型训练与测试

# ## 模型训练与验证

# fit函数解析 
# ```
# fit( x, y, batch_size=32, epochs=10, verbose=1, callbacks=None,
# validation_split=0.0, validation_data=None, shuffle=True, 
# class_weight=None, sample_weight=None, initial_epoch=0)
# ```

# * x：输入数据。如果模型只有一个输入，那么x的类型是numpy array，如果模型有多个输入，那么x的类型应当为list，list的元素是对应于各个输入的numpy array  
# * y：标签，numpy array  
# * batch_size：整数，指定进行梯度下降时每个batch包含的样本数。训练时一个batch的样本会被计算一次梯度下降，使目标函数优化一步。  
# * epochs：整数，训练终止时的epoch值，训练将在达到该epoch值时停止，当没有设置initial_epoch时，它就是训练的总轮数，否则训练的总轮数为epochs - inital_epoch  
# * verbose：日志显示，0为不在标准输出流输出日志信息，1为输出进度条记录，2为每个epoch输出一行记录  
# * callbacks：list，其中的元素是keras.callbacks.Callback的对象。这个list中的回调函数将会在训练过程中的适当时机被调用，参考回调函数  
# * validation_split：0~1之间的浮点数，用来指定训练集的一定比例数据作为验证集。验证集将不参与训练，并在每个epoch结束后测试的模型的指标，如损失函数、精确度等。注意，validation_split的划分在shuffle之前，因此如果你的数据本身是有序的，需要先手工打乱再指定validation_split，否则可能会出现验证集样本不均匀。  
# * validation_data：形式为（X，y）的tuple，是指定的验证集。此参数将覆盖validation_spilt。
# * shuffle：布尔值或字符串，一般为布尔值，表示是否在训练过程中随机打乱输入样本的顺序。若为字符串“batch”，则是用来处理HDF5数据的特殊情况，它将在batch内部将数据打乱。  
# * class_weight：字典，将不同的类别映射为不同的权值，该参数用来在训练过程中调整损失函数（只能用于训练）  
# * sample_weight：权值的numpy array，用于在训练时调整损失函数（仅用于训练）。可以传递一个1D的与样本等长的向量用于对样本进行1对1的加权，或者在面对时序数据时，传递一个的形式为（samples，sequence_length）的矩阵来为每个时间步上的样本赋不同的权。这种情况下请确定在编译模型时添加了sample_weight_mode=’temporal’。Timestep-wise sample weighting (use of sample_weight_mode="temporal") is restricted to outputs that are at least 3D, i.e. that have a time dimension.
# * initial_epoch: 从该参数指定的epoch开始训练，在继续之前的训练时有用。  


def scheduler(epoch):
# 每隔10个epoch，学习率减小为原来的1/10
    if epoch % 10 == 0 and epoch != 0:
        lr = Kbackend.get_value(model.optimizer.lr)
        Kbackend.set_value(model.optimizer.lr, lr * 0.1)
        print("lr changed to {}".format(lr * 0.1))
    return Kbackend.get_value(model.optimizer.lr)

# keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
if learning_rate_deacy_policy == 1:
    reduce_lr = [
        LearningRateScheduler(scheduler),
         # tf.keras.callbacks.ModelCheckpoint(model_file,
          #                      save_best_only=True)
    ]
elif learning_rate_deacy_policy == 2:
    reduce_lr = [
        # ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience = 5,verbose=2, mode='auto'),
        ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience = 10,verbose=2, mode='auto'),
    ]
        
else:
     # reduce_lr =  PrintDot()
    reduce_lr = [
        PrintDot(),
    ]
    
# 原文链接：https://blog.csdn.net/zzc15806/article/details/79711114



if model_stage == "train":
    if (flag == 1) or (flag == 2):
        print("flag:",flag)
        My_X = train_X
        My_Y = train_Y
        My_Test_X = test_X
        My_Test_Y = test_Y
        Train_W = train_weight
    elif flag == 4 or flag ==5:
        print("flag:",flag)
        My_X = trainX
        My_Y = []
        # # My_Y = [trainY[:,0],trainY[:,1],trainY[:,2]]
        # Train_W = [train_weight,train_weight,train_weight]
        My_Test_X = testX
        # My_Test_Y = [testY[:,0],testY[:,1],testY[:,2]]
        My_Test_Y = []
        Train_W = []
        # My_Test_Y = [testY[:,0],testY[:,1],testY[:,2],testY[:,3],testY[:,4]]
        tasks_num = len(element)
        for k in range(tasks_num):
            My_Y.append(trainY[:,k])
            My_Test_Y.append(testY[:,k])
            Train_W.append(train_weight)
        # My_Y = trainY
        # My_Test_Y = testY
    else:
        print("flag:",flag)
        My_X = trainX
        My_Y = trainY
        My_Test_X = testX
        My_Test_Y = testY
        Train_W = train_weight

# My_Y

if model_stage == "train":
        # history = model.fit(train_X, train_Y, batch_size=BATCH_SIZE,epochs=EPOCHS,
        #                validation_split = 0.1, verbose=1,callbacks=[PrintDot()])
    #     dataset = tf.data.Dataset.from_tensor_slices((train_X, train_Y))
    #     train_dataset  = dataset.shuffle(len(train_Y)).batch(1)
    #     t_dataset = tf.data.Dataset.from_tensor_slices((test_X,test_Y))
    #     val_dataset  = dataset.shuffle(len(test_Y)).batch(1)
    #     history = model.fit(train_dataset, epochs=EPOCHS,
    #                      validation_data = val_dataset, verbose=1,callbacks=[PrintDot()])

    if batch_size_strategy == True:
        if save_logs == False:
            history = model.fit(My_X, My_Y, batch_size = BATCH_SIZE,epochs=EPOCHS,validation_data = (My_Test_X,My_Test_Y),sample_weight = Train_W, verbose=2,callbacks=reduce_lr)
        else:
            history = model.fit(My_X, My_Y, batch_size = BATCH_SIZE,epochs=EPOCHS,validation_data = (My_Test_X,My_Test_Y),sample_weight = Train_W, verbose=2,callbacks=my_callbacks)
    else:
        if save_logs == False:
            history = model.fit(My_X, My_Y, epochs = EPOCHS,validation_data = (My_Test_X,My_Test_Y),sample_weight = Train_W, verbose=2,callbacks = reduce_lr)
        else:
            history = model.fit(My_X, My_Y, epochs = EPOCHS,validation_data = (My_Test_X,My_Test_Y),sample_weight = Train_W, verbose=2,callbacks = my_callbacks)
else:
    print("model_stage:", model_stage)





if model_stage == "train":
    json_config = model.to_json()
    with open(model_json, 'w') as json_file:
        json_file.write(json_config)

    model.save_weights(model_file)
    print("Model Save is Finished!")


# ## 模型测试

# model.evaluate输入数据(data)和金标准(label),然后将预测结果与金标准相比较,得到两者误差并输出.  
# model.predict输入数据(data),输出预测结果  
# * 是否需要真实标签(金标准)  
# model.evaluate需要,因为需要比较预测结果与真实标签的误差  
# model.predict不需要,只是单纯输出预测结果,全程不需要金标准的参与.  


def plot_history(history,flag_info,model_training_img_file_saving_path,model_training_img_name):
    hist = pd.DataFrame(history.history)
    hist['epoch'] = history.epoch

#     plt.figure()
#     plt.xlabel('Epoch')
#     plt.ylabel('Mean Abs Error [MAE]')
#     plt.plot(hist['epoch'], hist['mae'],label='Train Error')
#     plt.plot(hist['epoch'], hist['val_mae'],label = 'Val Error')
#     # plt.ylim([0,5])
#     plt.legend()
#     plt.savefig(model_training_img_file_saving_path + model_training_img_name + '_MAE.png', dpi=96,  bbox_inches='tight')
    
    plt.figure()
    plt.xlabel('Epoch')
    plt.ylabel('Mean Square Error [MSE]')
    if flag_info == 4 or flag_info == 5:
        plt.plot(hist['epoch'], hist['loss'],
           label='Train Error')
        plt.plot(hist['epoch'], hist['val_loss'],
               label = 'Val Error')
        # plt.plot(hist['epoch'], hist['lr'],
        #       label = 'Learning Rate')
    else:    
        plt.plot(hist['epoch'], hist['mse'],
               label='Train Error')
        plt.plot(hist['epoch'], hist['val_mse'],
               label = 'Val Error')
    # plt.ylim([0,20])
    plt.legend()
    
    plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
        learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS) + '_loss.png', dpi=96,  bbox_inches='tight')
    # plt.show()
    



# history.params



if model_stage == "train":
    hist = pd.DataFrame(history.history)
    # print(hist)
    logger.info(hist)



if model_stage == "train":
    if flag != 4 and flag != 5:
        train_loss_csv_name = str(reference_name) + "_trainloss.csv"
    else:
        train_loss_csv_name = str(element) + "_trainloss.csv"
    hist.to_csv(csv_file_saving_path + train_loss_csv_name,mode='w',float_format='%.6f',index=None,header=True)



if model_stage == "train":
    hist = pd.DataFrame(history.history)
    hist['epoch'] = history.epoch
    print(hist.tail())
    plot_history(history,flag,model_training_img_file_saving_path,model_training_img_name)



if model_stage == "train":
    model.evaluate(My_Test_X, My_Test_Y, verbose=2)





# predictions = model.predict(My_Test_X)
# predictions.shape



if model_stage == "train":
    
    if (flag == 1) or (flag == 2):
        plt.figure(figsize=(6,40))
        predictions = model.predict(test_X)
#         np.testing.assert_allclose(predictions, test_Y, atol=1e-6)
        loss, mae, mse = model.evaluate(test_X, test_Y, verbose=2)
        sample_index = np.arange(len(test_Y))
        plt.plot(test_Y,sample_index, label="实测")
        plt.plot(predictions,sample_index, label="预测")
        plt.grid(True)#显示网格线"
        plt.xlabel( element_name)
        plt.ylabel("验证样本编号")
        plt.title(element_name + "在验证集上")
        plt.legend(loc='best')
    elif flag == 4 or flag == 5:
        plt.figure(figsize=(12,40))
        predictions = model.predict(My_Test_X)
#         np.testing.assert_allclose(predictions, test_Y, atol=1e-6)
        loss = model.evaluate(My_Test_X, My_Test_Y, verbose=2)[0]
        sample_index = np.arange(len(testY))
        for i in range(len(element)):
            fig_no = "13" + str(i+1)
            plt.subplot(int(fig_no))
            plt.plot(My_Test_Y[i],sample_index, label = element[i])
            plt.plot(predictions[i],sample_index, label="预测" + element[i])
            plt.grid(True)#显示网格线"
            plt.ylabel("验证样本编号")
            plt.title(element[i] + "在验证集上")
            plt.legend(loc='best')
    else:
        plt.figure(figsize=(6,40))
        predictions = model.predict(testX)
#         np.testing.assert_allclose(predictions, testY, atol=1e-6)
        loss, mae, mse = model.evaluate(testX, testY, verbose=2)
        sample_index = np.arange(len(testY))
        plt.plot(testY,sample_index, label="实测")
        plt.plot(predictions,sample_index, label="预测")
        plt.grid(True)#显示网格线"
        plt.ylabel("验证样本编号")
        plt.title(element_name + "在验证集上")
        plt.legend(loc='best')
    # plt.plot(test_Y_calc,sample_index, label="计算")
    
    
    
    plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
        learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '_val.png', dpi=96,  bbox_inches='tight')
    # plt.show()
    # print("Testing set Mean Abs Error: {:5.5f} ".format(mae))
    # print("Testing set Mean Abs Error: {:5.5f} ".format(mse))
# 对于测试阶段目前暂不计算MSE，等反归一化后计算  
# else:
#    A_Y_predict = model.predict(testALL_A_X)


# ## 验证集预测值与真实标定值线性相关性分析


if model_stage == "train":
    GT = My_Test_Y
    if (flag == 1) or (flag == 2) or (flag == 3):
        rmse = np.sqrt(mean_squared_error(GT, predictions))  
        rmae = np.sqrt(mean_absolute_error(GT, predictions)) 
        ols = sm.OLS(GT, predictions).fit()
        logger.info('验证集预测值与真实标定值线性相关性分析')
        logger.info(ols.summary())
        logger.info('Test RMSE: %.4f' % rmse)
        logger.info('Test RMAE: %.4f' % rmae)
        rmse = "{:.9f}".format(rmse)
        rmae = "{:.9f}".format(rmae)
        summary_result = sm.iolib.summary.Summary.as_text(ols.summary())
        corr_index = summary_result.split("\nModel")[0].split("R-squared (uncentered):")[-1].strip()
    else: 
        rmse = np.zeros(len(element))
        rmae = np.zeros(len(element))
        ols = []
        corr_index = np.zeros(len(element))
        for i in range(len(element)):
            # i = element.index(item)
            rmse[i] = np.sqrt(mean_squared_error(GT[i], predictions[i]))
            rmae[i] = np.sqrt(mean_absolute_error(GT[i], predictions[i]))
            ols_ = sm.OLS(GT[i], predictions[i]).fit()
            logger.info(ols_.summary())
            logger.info("Task: " + str(i))
            summary_result = None
            summary_result = sm.iolib.summary.Summary.as_text(ols_.summary())
            corr_index[i] = summary_result.split("\nModel")[0].split("R-squared (uncentered):")[-1].strip()
            logger.info('验证集预测值与真实标定值线性相关性分析')
            
            ols.append(ols_)
            logger.info('Test RMSE: %.4f' % rmse[i])
            logger.info('Test RMAE: %.4f' % rmae[i])
            logger.info('R-squared: %.4f' % corr_index[i])
            rmae[i] = "{:.8f}".format(rmae[i])
            rmse[i] = "{:.8f}".format(rmse[i])
            corr_index[i] = "{:.8f}".format(corr_index[i])



if model_stage == "train":
    if flag == 4 or flag ==5:
        # float(corr_index)
#         print(ols[1].summary())
        print(corr_index)
    else:
#         summary_result = sm.iolib.summary.Summary.as_text(ols.summary())
#         corr_index = summary_result.split("\nModel")[0].split("R-squared (uncentered):")[-1].strip()
        float(corr_index)
        print(corr_index)



# if model_stage == "train":
# #     print(My_Test_Y[0])







if model_stage == "train":
    error = []
    if flag != 4 and flag!=5:
        plt.figure(figsize=(16,8))
        plt.subplot(121)
        plt.scatter(My_Test_Y, predictions)
        error = predictions - My_Test_Y
        plt.xlabel('True Values [MPG]')
        plt.ylabel('Predictions [MPG]')
        plt.axis('equal')
        plt.axis('square')
        plt.xlim([0,plt.xlim()[1]])
        plt.ylim([0,plt.ylim()[1]])
        plt.text(0.1 * plt.xlim()[1],0.9 * plt.ylim()[1], 'RMSE:' + str(rmse),fontdict=font)
        plt.text(0.1 * plt.xlim()[1],0.85 * plt.ylim()[1], 'RMAE:' + str(rmae),fontdict=font)
        plt.text(0.65 * plt.xlim()[1],0.1 * plt.ylim()[1], 'R:' + str(corr_index),fontdict=font)
        _ = plt.plot([-100, 100], [-100, 100]) 
        plt.subplot(122)
        plt.hist(error, bins = 25)
        plt.xlabel("Prediction Error [MPG]")
        _ = plt.ylabel("Count")
    else:
        plt.figure(figsize=(8 * tasks_num,16))
        for k in range(tasks_num):
            fig_label_no = str(2) + str(tasks_num) + str(k + 1)
            plt.subplot(int(fig_label_no))
            plt.title(element[k])
            plt.scatter(My_Test_Y[k], predictions[k])
            error = predictions[k] - My_Test_Y[k]
        # rmse = np.sqrt(mean_squared_error(testY, predictions))
            plt.xlabel('True Values [MPG]')
            plt.ylabel('Predictions [MPG]')
            plt.axis('equal')
            plt.axis('square')
#             plt.xlim([0,plt.xlim()[1]])
#             plt.ylim([0,plt.ylim()[1]])
            plt.text(0.1 * plt.xlim()[1],0.9 * plt.ylim()[1], 'RMSE:' + str(rmse[k]),fontdict=font)
            plt.text(0.1 * plt.xlim()[1],0.85 * plt.ylim()[1], 'RMAE:' + str(rmae[k]),fontdict=font)
            plt.text(0.1 * plt.xlim()[1],0.8 * plt.ylim()[1], 'R:' + str(corr_index[k]),fontdict=font)
            _ = plt.plot([-100, 100], [-100, 100])
   
        k = 0
        for k in range(tasks_num):
            fig_label_no_1 = str(2) + str(tasks_num) + str(tasks_num + k + 1)
            plt.subplot(int(fig_label_no_1))
            plt.title(element[k])
            plt.hist(error[k], bins = 25)
            plt.xlabel("Prediction Error [MPG]")
            _ = plt.ylabel("Count")

    plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
            learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '_distr.png', dpi=96,  bbox_inches='tight')
#     plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
#         learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '_val.png', dpi=96,  bbox_inches='tight')
    
    # plt.show()
    # print('Test RMSE: %.3f' % rmse)


if paly_music == True:
    mixer.init()
    music_path = "music/"
    file = "中国爱乐乐团 - 月亮之上(交响乐版).mp3"
    music_file = os.path.join(music_path,file)
    mixer.music.load(music_file)
    mixer.music.play()
    time.sleep(6)
    mixer.music.stop()
        # exit()






