# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'

# # 导入依赖包
# # numpy+mkl 版本为17.2  
# tensorboard，tensorflow 版本为2.1.0  
# pydot版本为1.4.1, graphviz 版本为0.13.2

# ## 导入第三方包
import os
import pandas as pd
import numpy as np
import tensorflow as tf
import time
import math
import seaborn as sns
import statsmodels.api as sm    # 0.11.1
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# from tensorflow.keras import backend as Kbackend

# pydot_ng 用于绘制网络图
from tqdm import tqdm
import pydot_ng as pg  # 2.0.0
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.model_selection import train_test_split
from sklearn import model_selection
from sklearn.preprocessing import scale
from sklearn.metrics import mean_absolute_error
from pandas import set_option
# from tensorflow import keras
# from tensorflow.keras import layers
# from tensorflow.keras.callbacks import TensorBoard
# from tensorflow.keras.callbacks import LearningRateScheduler
# from tensorflow.keras.callbacks import ReduceLROnPlateau
from pylab import *
from scipy import interpolate
from pygame import mixer


# calculate RMSE
from sklearn.metrics import mean_squared_error
# from itertools import cycle
from tqdm import tqdm
# sklearn.metrics.mean_squared_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average', squared=True)
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html#sklearn.metrics.mean_squared_error


print(tf.__version__)

# ## 导入自己的包
import senutil as sen
# from rbflayer import RBFLayer, InitCentersRandom
import senmodels as sms

## x下面两行用于省略警告
import warnings
warnings.filterwarnings("ignore")

mpl.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
set_option("display.max_rows", 15)
set_option('display.width', 200)
np.set_printoptions(suppress=True, threshold=5000)


font={'family':'SimHei',
     'style':'italic',
    'weight' :'normal',
      'color':'red',
      'size':16
}
# --------------------------------导入库结束--------------------------------------
# -----------------------------------方法定义--------------------------------------------
# 测试数据预处理流程
def preprocessing_testing_logdata(test_data_read, input_vectors, flag, u_log_name):
    if DEPTH_col_name in test_data_read.columns.values:
        # use_depth = True
        depth_log = test_data_read.loc[:, [DEPTH_col_name]]
        depth_log = np.array(depth_log)
        cucao_depth = np.array(depth_log)
        cucao_depth.shape = (len(cucao_depth),)
        begin_depth = depth_log[0][0]
        end_depth = depth_log[-1][0]
        # print("begin_depth",begin_depth,"end_depth",end_depth)
    else:
        pass
        # print("No Depth Information,all method is end!!!")
        # exit()
    # print("use_depth_log:",use_depth)
    # ## 设置自变量应变量数据
    # 电阻率曲线取对数值
    inputX = test_data_read.loc[:, input_vectors]
    # ### 电阻率取对数
    electric_log = ["RD","RS","RLLD","RLLS"]
    for item in electric_log:
        if item in input_vectors:
            # print(item)
            # for i in tqdm(range(len(inputX)), desc='处理中', ncols=75):
            for i in range(len(inputX)):
                # print(i,len(inputX))
                if (inputX[item][i]) <= 0.01:
                    inputX[item][i] = 0.01 
            inputX[item] = np.log10(inputX[item])

    AB_G = sen.zero_one_scaler(inputX,u_log_name)
    # print("AB_G.shape:", AB_G.shape)
    AB_X = np.array(AB_G)
    # print("zero_one_scaler is finished!")
    # 根据模型需要判定是否需要序列化
    if (flag == 1) or (flag == 2):
        # print("不需要序列化")
        # 测试阶段
        testALL_A_X = AB_X
        if use_depth == True:
            depth_output = depth_log
        else:
            depth_output = None
    else:
        # print("序列化")
        # 测试阶段
        testALL_A_X = sen.build_All_A_dataset(AB_X, seq_length)
        if use_depth == True:
            depth_output = sen.build_addReslution_DEPTH(depth_log, seq_length)
        else:
            depth_output = None
    # ### 确认输入维度
    # print("testALL_A_X.shape:", testALL_A_X.shape,"\n","input_vectors.length:",len(input_vectors))

    return testALL_A_X, depth_output, use_depth


# 预测一口井的曲线
def inference_logs(testALL_A_X, model, flag, element_name, e_log_name, constant_value):
    A_Y_predict = model.predict(testALL_A_X)
    # print(model.summary())
    # if flag != 4:
    #     print(min(A_Y_predict),max(A_Y_predict))
    # else:
    #     for i in range(0, tasks_num):
    #         print(min(A_Y_predict[i]),max(A_Y_predict[i]))
    A_Y_predict_final_GY = A_Y_predict.copy()

    # 对预测的值进行处理
    if flag != 4:
        for j in range(0, len(A_Y_predict_final_GY)):
            if A_Y_predict[j] < 0:
                A_Y_predict_final_GY[j] = 0
    else:
        for j in range(0, len(A_Y_predict_final_GY)):
            for k in range(0, len(A_Y_predict_final_GY[j])):
                if A_Y_predict[j][k] < 0:
                    # 保证三个值都大于0
                    A_Y_predict_final_GY[j][k] = 0
                if A_Y_predict[j][k] > 1:
                    # 保证三个值都小于1
                    A_Y_predict_final_GY[j][k] = 1

    # print("len(A_Y_predict_final_GY):",len(A_Y_predict_final_GY))#,A_Y_predict_final_GY
    # print("type(A_Y_predict_final_GY):",type(A_Y_predict_final_GY))

    ## 根据物性参数曲线范围还原对应分辨率的预测曲线
    if flag != 4:
        pred_element_index = element.index(element_name)   
    # print("pred_element_index:",pred_element_index,type(pred_element_index),e_log_name[pred_element_index])
        testALL_Y_predict = sen.revivification_scaler(A_Y_predict_final_GY,e_log_name,pred_element_index)
        if element_name == "渗透率":
            Y_predict_final = sen.revivification_permeablity(testALL_Y_predict,constant_value)
            testALL_Y_predict = Y_predict_final
    else:
        A_Y_predict_final_GY = np.array(A_Y_predict_final_GY)
        A_Y_predict_final_GY.shape = (tasks_num,len(A_Y_predict_final_GY[0]))
        Y_predict_final = np.zeros_like(A_Y_predict_final_GY)
        for j in range(len(A_Y_predict_final_GY)):
            Y_predict_final[j] = sen.revivification_scaler(A_Y_predict_final_GY[j],e_log_name,j)
        Y_predict_final_use = Y_predict_final.copy()
        if "渗透率" in element:
            permeablity_id = element.index("渗透率")
            # print("渗透率逆变换")
            Y_predict_final_use[permeablity_id] = sen.revivification_permeablity(Y_predict_final[permeablity_id],constant_value)   
        # 对结果进行转置，便于后面绘图和输出       
        testALL_Y_predict = Y_predict_final_use.transpose()
        # print(A_Y_predict_final_GY.shape,"testALL_Y_predict.shape:",testALL_Y_predict.shape)
    
    return testALL_Y_predict



# 生成结果曲线
def generate_compare_logs(element_name, element, testALL_Y_predict_final, use_depth_info, DEPTH_col_name, DEPTH_AddReslution):
    if flag != 4:
        pred_data = pd.DataFrame(testALL_Y_predict_final, columns=[element_name + "预测"])
    else:
        pred_task = []
        for item in element:
            pred_task.append(item + "_预测")
        pred_data = pd.DataFrame(testALL_Y_predict_final, columns=pred_task)
    # compare_data = pd.concat([pred_data],axis=1)
    if use_depth_info == True:
        depth_data = pd.DataFrame(DEPTH_AddReslution,columns=[DEPTH_col_name])
        compare_data = pd.concat([depth_data,pred_data],axis=1)
    else:
        compare_data = pd.concat([pred_data],axis=1)
    return compare_data

# 绘制测井曲线
def log_plot(DEPTH_col_name, logs):
    if DEPTH_col_name in logs.columns:
        ztop= logs[DEPTH_col_name].min(); zbot = logs[DEPTH_col_name].max()
        sample_index = logs[DEPTH_col_name]
    else:
        ztop = 0; zbot = len(logs)
        sample_index = np.arange(zbot)
    
    total_fig_cols = len(logs.columns)
    f, ax = plt.subplots(nrows=1, ncols=total_fig_cols, figsize=(total_fig_cols * 2,15))
    # 去除第一列深度列的图,所以有减去1的操作

    for i in range(total_fig_cols):
        ax[i].plot(logs.iloc[:,i], sample_index)
 
    line_label = logs.columns.tolist()
    for i in range(len(ax)):
        ax[i].set_ylim(ztop,zbot)
        ax[i].invert_yaxis()
        ax[i].grid()
        ax[i].locator_params(axis='x', nbins=3)
        ax[i].set_xlabel(line_label[i])
        ax[i].set_xlim(logs.iloc[:,i].min() - 0.01,logs.iloc[:,i].max() * 1.02)
        
    for i in range(len(ax)):
        ax[i].set_yticklabels([]);
        
    # f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
    f.suptitle("全部样本预测结果", fontsize=14,y=0.94)
    f.savefig(model_testing_img_file_saving_path + model_testing_image_name + '_PredictionAll.png', dpi=96,  bbox_inches='tight')
    plt.close()


def log_scatter_plot(logs, job_nums):
    f, ax = plt.subplots(nrows=1, ncols = job_nums, figsize=(job_nums * 8,8))
    point_colors = ['aqua', 'darkorange', 'cornflowerblue']   #python3里的无穷循环器
    for i in range(job_nums):
        ax[i].scatter(logs.iloc[:,i], logs.iloc[:,i + job_nums])
        
    line_label = logs.columns.tolist()
    for i in range(len(ax)):
        ax[i].set_ylim(logs.iloc[:,i+ job_nums].min() * 0.8,logs.iloc[:,i+ job_nums].max() * 1.05)
        ax[i].invert_yaxis()
        ax[i].grid()
        ax[i].locator_params(axis='x', nbins=3)
        ax[i].set_xlabel(line_label[i])
        ax[i].set_ylabel(line_label[i+ job_nums])
        ax[i].set_xlim(logs.iloc[:,i].min() * 0.8,logs.iloc[:,i].max() * 1.05)
      
    for i in range(len(ax)):
        ax[i].set_yticklabels([]);
        
    # f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
    f.suptitle("散点图", fontsize=14, y=0.94)
    f.savefig(model_testing_img_file_saving_path + model_testing_image_name + '_ScatterAll.png', dpi=96,  bbox_inches='tight')
    plt.close()


# # 训练阶段的验证操作到此程序结束
# # 总评价模块

def testing_ground_exists(flag, use_high_R_data, TestDataPath, HighRDataPath, element, reference, element_name, reference_name):
    if use_high_R_data == True:
    # 测试阶段
        # print("有真实标定值！！！")
        High_R_ALL = pd.read_csv(HighRDataPath,engine='python',encoding='GBK')
        if flag != 4:
            High_R = High_R_ALL[element_name]
            all_Y_clac = High_R_ALL[reference_name]
            High_R = np.array(High_R)
            High_R.shape = (len(High_R),)
            all_Y_clac = np.array(all_Y_clac)
            all_Y_clac.shape = (len(all_Y_clac),)
        else:
            High_R = High_R_ALL[element]
            all_Y_clac = High_R_ALL[reference]
            High_R = np.array(High_R)
            all_Y_clac = np.array(all_Y_clac)
        add_flag = 2
    else:
        # print("无真实标定值！！！")
        High_R = None
        all_Y_clac = pd.read_csv(TestDataPath,engine='python',encoding='GBK')
        if flag != 4:
            all_Y_clac = all_Y_clac[reference_name]
            all_Y_clac = np.array(all_Y_clac)
            all_Y_clac.shape = (len(all_Y_clac),)
        else:
            all_Y_clac = all_Y_clac[reference]
            all_Y_clac = np.array(all_Y_clac)
            # all_Y_clac.shape = (len(all_Y_clac),)
        add_flag = 3

    return add_flag, High_R, all_Y_clac       


# ## 绘制折线图和散点图

# 绘制散点图
def plot_testing_scatter(add_flag, High_R_Label,Y_clac,testdata_Y_predict_final,element,element_name):
    if flag != 4:
        value_max = max(High_R_Label) * 1.05
        value_min = -0.2
        plt.figure(figsize=(15, 5))
        plt.subplot(131)
        plt.title("预测值和真实值比较")
        plt.scatter(testdata_Y_predict_final,High_R_Label,color="dodgerblue")
        plt.grid(linestyle = '--')
        plt.xlabel("预测" + element_name)
        plt.ylabel("真实值")
        if add_flag == 2:
            plt.subplot(132)
            plt.title("预测值和计算值比较")
            plt.scatter(testdata_Y_predict_final,Y_clac,color="orange")
            plt.grid(linestyle = '--')
            plt.xlabel("预测" + element_name)
            plt.ylabel("计算" + element_name)
            plt.subplot(133)
            plt.title("预测值和计算值比较")
            plt.scatter(Y_clac,High_R_Label,color="green")
            plt.grid(linestyle = '--')
            plt.xlabel("计算" + element_name)
            plt.ylabel("真实" + element_name)
    else:
        plt.figure(figsize=(15, 15))
        for i in range(len(element)):
            value_max = max(High_R_Label[:,i]) * 1.05
            value_min = -0.2
            sub_no = str(33) + str(i+1)
            plt.subplot(sub_no)
            plt.title("预测值和真实值比较")
            plt.scatter(testdata_Y_predict_final[:,i],High_R_Label[:,i],color="dodgerblue")
            plt.grid(linestyle = '--')
            plt.xlabel("预测" + element[i])
            plt.ylabel("真实值")
            plt.xlim(value_min,value_max)
            plt.ylim(value_min,value_max)
        if add_flag ==2:
            for j in range(len(element)):
                value_max = max(High_R_Label[:,j]) * 1.05
                value_min = -0.2
                sub_no_1 = str(33) + str(3+j+1)
                plt.subplot(sub_no_1)
                plt.title("预测值和计算值比较")
                plt.scatter(testdata_Y_predict_final[:,j],Y_clac[:,j],color="orange")
                plt.grid(linestyle = '--')
                plt.xlabel("预测" + element[j])
                plt.ylabel("计算" + element[j])
                sub_no_2 = str(33) + str(6+j+1)
                plt.subplot(sub_no_2)
                plt.title("真实值和计算值比较")
                plt.scatter(Y_clac[:,j],High_R_Label[:,j],color="green")
                plt.grid(linestyle = '--')
                plt.xlabel("计算" + element[j]) 
                plt.ylabel("真实" + element[j])
        plt.tight_layout()
        plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + '_constract_scatterMap.png', dpi=96,  bbox_inches='tight')
        # plt.show()
        plt.close()

# 绘制折线图，预测结果与真实标定趋势对比


# if use_high_R_data == True:
#     print(testdata_Y_predict_final.shape,all_Y_clac.shape,len(element),testdata_Y_predict_final[:,0])


def concate_testing_result_logs(flag, use_high_R_data, element, element_name, use_depth_info, DEPTH_AddReslution, testdata_Y_predict_final, High_R_data, input_Y_clac):
    if use_high_R_data == True:
        if (flag == 1) or (flag == 2):
            Real_R_Label = High_R_data.copy()
            Y_clac = input_Y_clac.copy()
        else:
            Real_R_Label = sen.build_HighReslution_Label(High_R_data, seq_length)
            Y_clac = sen.build_HighReslution_Label(input_Y_clac, seq_length)
    else:
        if (flag == 1) or (flag == 2):
            Real_R_Label = None
            Y_clac = input_Y_clac.copy()
        else:
            Real_R_Label = None
            Y_clac = sen.build_HighReslution_Label(input_Y_clac, seq_length)
    if flag != 4:
        plot_data1 = pd.DataFrame(testdata_Y_predict_final,columns=["预测" + element_name])
        plot_data2 = pd.DataFrame(Y_clac,columns=["计算" + element_name])
        plot_data = pd.concat([plot_data1,plot_data2],axis=1)
        if use_high_R_data == True:
            true_data = pd.DataFrame(Real_R_Label,columns=["实测" +element_name])
            true_Y = Real_R_Label
    else:
        plot_data = None
        for i in range(len(element)):
            plot_data1 = pd.DataFrame(testdata_Y_predict_final[:,i],columns=["预测" + element[i]])
            plot_data2 = pd.DataFrame(Y_clac[:,i],columns=["计算" + element[i]])
            plot_data = pd.concat([plot_data,plot_data1,plot_data2],axis=1)
            if use_high_R_data == True:
                true_data = pd.DataFrame(Real_R_Label[:,i],columns=["实测" + element[i]])
                plot_data = pd.concat([plot_data,plot_data1,plot_data2,true_data],axis=1)
            else:
                plot_data = pd.concat([plot_data,plot_data1,plot_data2],axis=1)
    if use_depth_info == True:
        depth_data = pd.DataFrame(DEPTH_AddReslution,columns=[DEPTH_col_name])
        plot_data = pd.concat([depth_data,plot_data],axis=1)
    else:
        pass
    return plot_data, Real_R_Label, Y_clac


# 绘制测试折线图
def plot_testing_lines(add_flag, use_depth_info, High_R_Label,Y_clac,testdata_Y_predict_final,DEPTH_AddReslution,element,element_name):
    if use_depth_info == True:
        plt.figure(figsize=(10, 12))
        # plt.title("testALL_Y_predict_final_element_log_revivification")
        plt.title("预测曲线展示")  
        # 说明曲线长度没有矫正过程，预测曲线为testALL_Y_predict_final == testdata_Y_predict_final
        if (flag == 1) or (flag == 2):
            # print("2")
            if add_flag == 2:
                High_R_Label = High_R
            plt.plot(testdata_Y_predict_final,DEPTH_AddReslution,color="dodgerblue", label="预测")
            plt.plot(High_R_Label,DEPTH_AddReslution,color="black", label="实测")
            plt.plot(Y_clac,DEPTH_AddReslution,color="yellow", label="计算")
            plt.xlabel(element_name)
        else:
            # print("3") 
            if flag !=4:
                if add_flag == 2: 
                    plt.plot(High_R_Label,DEPTH_AddReslution,color="black", label="实测")
                plt.plot(testdata_Y_predict_final,DEPTH_AddReslution,color="dodgerblue", label="预测")
                plt.plot(Y_clac,DEPTH_AddReslution,color="yellow", label="计算")
                plt.xlabel(element_name)
            else:
                for i in range(len(element)):
                    if add_flag == 2: 
                        plt.plot(High_R_Label[:,i],DEPTH_AddReslution, color="black", linestyle = plot_line_style[i + 1], label="实测" + element[i])
                    plt.plot(testdata_Y_predict_final[:,i],DEPTH_AddReslution,color="dodgerblue", linestyle = plot_line_style[i + 1],label="预测_" + element[i])
                    plt.plot(Y_clac[:,i],DEPTH_AddReslution,color="yellow",linestyle = plot_line_style[i + 1], label="计算"+ element[i])
                plt.xlabel("物性参数")
        plt.ylabel("深度(m)")
        plt.legend(loc='best')

    #     if model_stage == "train":    
    #         plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
    #                 learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '_trendency.png', dpi=96,  bbox_inches='tight')
    #     else:
        plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + '_trendency.png', dpi=96,  bbox_inches='tight')
        # plt.show()
        plt.close()
    else:
        pass


def plot_hist_figure(model_stage, tasks_num_info, use_high_R_data, add_flag,  true_Y_label, Y_claculation, predict_final_data,element_name, element):
    fig_cols = 3
    fig_rows = tasks_num_info * 1
    total_fig_nums = fig_rows * fig_cols
    hist_f, hist_ax = plt.subplots(nrows=fig_rows, ncols=fig_cols, figsize=(4 * fig_cols, 4 * fig_rows))        
    for i in range(tasks_num_info):
        if (use_high_R_data == True) or (model_stage == "train"):
            # a = np.histogram(true_Y_label[:,i])[0]
            # b = np.zeros(np.histogram(predict_final_data[:,i])[1].shape)
            # print("np.histogram(predict_final_data[:,i])[1].shape:", np.histogram(predict_final_data[:,i])[1].shape)
            if tasks_num_info == 1:
                b = np.histogram(true_Y_label)[1] 
            else:
                b = np.histogram(true_Y_label[:,i])[1]    
        else:    
            # a = np.histogram(predict_final_data[:,i])[0]
            b = np.histogram(predict_final_data[:,i])[1]
        if tasks_num_info != 3:
            label_1 = "预测_" + element_name
            label_2 = "实测_" + element_name
            label_3 = "计算_" + element_name
        else:
            label_1 = "预测_" + element[i]
            label_2 = "实测_" + element[i]
            label_3 = "计算_" + element[i]      
        if tasks_num_info == 1:
            j = 0
            hist_ax[j].hist(predict_final_data,color = "dodgerblue", label = label_1, bins = b[1:len(b)] )
            hist_ax[j].legend(loc="upper right")
            hist_ax[j].grid(linestyle = '--')
            j = j + 1
            hist_ax[j].hist(true_Y_label,color = "black",label = label_2, bins = b[1:len(b)])
            hist_ax[j].legend(loc="upper right")
            hist_ax[j].grid(linestyle = '--')
            j = j + 1
            hist_ax[j].hist(Y_claculation,color = "yellow", label = label_3, bins = b[1:len(b)] )
            hist_ax[j].legend(loc="upper right")
            hist_ax[j].grid(linestyle = '--')
        else:
            j = 0
            hist_ax[i][j].hist(predict_final_data[:,i],color = "dodgerblue", label = label_1, bins = b[1:len(b)] )
            hist_ax[i][j].legend(loc="upper right")
            hist_ax[i][j].grid(linestyle = '--')
            j = j + 1
            hist_ax[i][j].hist(true_Y_label[:,i],color = "black",label = label_2, bins = b[1:len(b)])
            hist_ax[i][j].legend(loc="upper right")
            hist_ax[i][j].grid(linestyle = '--')
            j = j + 1
            hist_ax[i][j].hist(Y_claculation[:,i],color = "yellow", label = label_3, bins = b[1:len(b)] )
            hist_ax[i][j].legend(loc="upper right")
            hist_ax[i][j].grid(linestyle = '--')
    plt.legend(loc="upper right")
    plt.grid(linestyle = '--')

    # if model_stage == "train":
    #         plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
    #     learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '-hist.png', dpi=96,  bbox_inches='tight')
    # else:
    plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + '-hist.png', dpi=96,  bbox_inches='tight')
    # plt.show()
    plt.close()



# ## 预测值与真实标定值线性相关性分析
# ## 计算值和真实值相关性
def evaluate_testing_result(element, tasks_num, High_R_Label, testdata_Y_predict_final):
    # @ para: element 物性参数列表
    # @ para: task_num 本次使用的物性参数个数
    # @ para: High_R_Label  真实的物性参数测量值
    # @ para: testdata_Y_predict_final  预测的物性参数或是计算的物性参数
    if flag != 4:
        if add_flag == 1 or add_flag == 2:
            rmse_E_R = np.zeros(1)
            rmae_E_R = np.zeros(1)
            corr_E_R = np.zeros(1)
            rmse_E_R[0] = np.sqrt(mean_squared_error(High_R_Label, testdata_Y_predict_final))
            rmae_E_R[0] = np.sqrt(mean_absolute_error(High_R_Label, testdata_Y_predict_final)) 
            ols_E_R = sm.OLS(High_R_Label, testdata_Y_predict_final).fit()
            E_R_summary_result = sm.iolib.summary.Summary.as_text(ols_E_R.summary())
            corr_index = E_R_summary_result.split("\nModel")[0].split("R-squared (uncentered):")[-1].strip()
            # print('与真实标定值线性相关性分析')
            # print(ols_E_R.summary())
            # print(E_R_summary_result)
            # print(corr_index)
            # print('Test RMSE: %.3f' % rmse_E_R[0])
            # print('Test RMAE: %.3f' % rmae_E_R[0])
            corr_E_R[0] = float(corr_index)
    else:
        pred_all = testdata_Y_predict_final
        if add_flag == 1 or add_flag == 2:
            rmse_E_R = np.zeros(tasks_num)
            rmae_E_R = np.zeros(tasks_num)
            corr_E_R = np.zeros(tasks_num)
            for i in range(0,len(element)):
                rmse_E_R[i] = np.sqrt(mean_squared_error(High_R_Label[:,i], pred_all[:,i]))
                rmae_E_R[i] = np.sqrt(mean_absolute_error(High_R_Label[:,i], pred_all[:,i])) 
                ols_E_R = sm.OLS(High_R_Label[:,i], pred_all[:,i]).fit()
                E_R_summary_result = sm.iolib.summary.Summary.as_text(ols_E_R.summary())
                corr_index = E_R_summary_result.split("\nModel")[0].split("R-squared (uncentered):")[-1].strip()
                # print('与真实标定值线性相关性分析——',element[i])
                # print(ols_E_R.summary())
                # print('Test RMSE: %.3f' % rmse_E_R[i])
                # print('Test RMAE: %.3f' % rmae_E_R[i])
                corr_E_R[i] = float(corr_index)
    return rmse_E_R, rmae_E_R, corr_E_R


# 查看结果分布
def plot_distrubution_figure(model_stage, flag_info, tasks_num_info, use_high_R_data, High_R_Label,Y_claculation,testdata_Y_predict_final,task_name,element_list):
    sns.set_style('white')
    sns.set(font=font['family'])
        # 图表风格设置
        # 风格选择包括："white", "dark", "whitegrid", "darkgrid", "ticks"
    plt.figure(figsize=(12, tasks_num_info * 4))#绘制画布
    plt.title("数据分布分析")  # 添加图表名
    for i in range(tasks_num_info):
        if flag_info != 4:
            fig_No = 100 + 1 * 10 + i * 1 + 1
            label_1 = "预测_" + task_name
            label_2 = "实测_" + task_name
            label_3 = "计算_" + task_name
        else:
            fig_No = tasks_num_info * 100 + 1 * 10 + i * 1 + 1
            label_1 = "预测_" + element[i]
            label_2 = "实测_" + element[i]
            label_3 = "计算_" + element[i]
        plt.subplot(fig_No)
        if tasks_num_info == 1:
            sns.distplot(testdata_Y_predict_final,  kde=False,label = label_1,hist=True,color='r')
            sns.distplot(High_R_Label,  label = label_2, kde=False,hist=True,color='b')
            sns.distplot(Y_claculation,  kde=False,label = label_3,hist=True,color='yellow')
        else:
            sns.distplot(testdata_Y_predict_final[i],  kde=False,label = label_1,hist=True,color='r')
            sns.distplot(High_R_Label[i],  label = label_2, kde=False,hist=True,color='b')
            sns.distplot(Y_claculation[i],  kde=False,label = label_3,hist=True,color='yellow')
        plt.legend(loc="upper right")
        plt.grid(linestyle = '--')     # 添加网格线
    # if model_stage == "train":
    #     plt.savefig(model_training_img_file_saving_path + model_training_img_name +"_" + task_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
    #         learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS)+ '_add-R_hist.png', dpi=96,  bbox_inches='tight')
    # else:
    plt.savefig(model_testing_img_file_saving_path + model_testing_image_name + '_add-R-hist.png', dpi=96,  bbox_inches='tight')
    # plt.show()
    plt.close()


    
# # 曲线结果保存

# filename_A ： 'HP1_orginLog_6D_4075m-4280m_R_0.125.csv'
# well_name


# if use_depth_log == True:
#     print("begin_depth,end_depth:",begin_depth,end_depth)


# ## 查看保存的结果曲线
# if use_depth_log == False:
#     depth_log = all_sample_index
#     DEPTH_AddReslution = all_sample_index

# #结果输出至文件  

def output_prediction_result(well_name,element,element_name,pd_data,csv_file_saving_path):
    # pd_data = plot_data
    if flag != 4:
        result_csv_name = well_name + "_" + element_name + '_Pred_R_'+ sen.tid_maker() +'.txt'
    else:
        result_csv_name = well_name + "_" + str(element) + '_Pred_R_'+ sen.tid_maker() +'.txt'
    pd_data.to_csv(csv_file_saving_path + result_csv_name,mode='w',encoding = 'GBK', float_format='%.4f',sep='\t',index=None,header=True)
    # print("Prediction Algorithm is Finished!!")

# -----------------------------------------------------Main Function------------------------------------------------------------------------------------------------------

# tf.config.experimental.list_physical_devices()

# 使用GPU训练时候，打开下面注释
os.environ['CUDA_VISIBLE_DEVICES'] = "0"  # 默认GPU id: "0"
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# physical_devices = tf.config.experimental.list_physical_devices('CPU')
# tf.config.experimental.set_memory_growth(physical_devices[0], True)

# print(physical_devices)


# 测试单个文件还是多个文件   "Single" | Many"
# mode = "Single"
mode = "Many"

# 测试多个文件时候结果是否都写如txt文件
WRITE_ALL_FILES_RESULT =  False #True

# # 准备工作（数据处理，结果保存位置定义）
# ## 定义数据集所在位置
# ### 训练集所在位置
# data/train ; data/train2
TrainDataPath = 'data/train2/'

# filename_AB:  
# data/train:(1) 井数据1_20190603_孔隙度_训练.csv; (2) 井数据2_20190718_孔隙度_训练.csv;  
# data/train2

# filename_AB = 'well_using_location_2_section_1_train.csv'  'well_using_location_2_section_1_train_cleaned.csv'
filename_AB = '井数据2_20190718_训练_section_1and2-train.csv'
# '井数据2_20190718_训练_section_1-train.csv'
TrainDataPath = os.path.join(TrainDataPath,filename_AB)
# print(TrainDataPath)

# ### 预测阶段测试集所在位置

# TestDataPath = 'data/test2/'  # (2) 不带岩心数据：
TestDataPath = 'data/CurveData/section_test_2nd/'   # (1) 带岩心：

# filename_A为预测曲线对应的常规曲线数据
# (1) 带岩心：  166.csv;
# (2) 不带岩心数据：166.csv;

filename_A = '166.csv'
addR_well_name = filename_A.split(".")[0]


# ### 低分辨率数据集所在位置
# use_low_R_data = False

# 低分辨率数据点的间隔，default 1 | 0.5
step_length = 1

# ### 真实数据集所在位置
# 用于验证预测的数据和实测数据吻合度，实际中可能没有, 
###   有的话，选择 “True”
use_high_R_data = False   # True | False

# 高分辨率相对于低分辨率的倍数, default value = 10 (对应于0.1m); 8 (对应于0.125m)
resolution = 8

# HighRDataPath = 'data/testH/'  # 带岩心标签数据：
HighRDataPath = 'data/CurveData/section_test_2nd/'   # 带岩心标签数据：
# filename_C_H为真实的测井曲线 

filename_C_H = '166.csv'


if mode == "Single":
    TestDataPath = os.path.join(TestDataPath,filename_A)
    HighRDataPath = os.path.join(HighRDataPath,filename_C_H)
    well_name = filename_A.split("_")[0]
    train_well_name = filename_AB.split(".")[0]
else:
    train_well_name = filename_AB.split(".")[0]
    # pass    
    test_file_path_list = []
    real_label_file_path_list = []
    for parent, dirnames, filenames in os.walk(TestDataPath,  followlinks=True):
        # print(filenames)
        for filename in filenames:
            file_path = os.path.join(parent, filename)
            test_file_path_list.append(file_path)
    for parent, dirnames, filenames in os.walk(HighRDataPath,  followlinks=True):
            # print(filenames)
        for filename in filenames:
            file_path = os.path.join(parent, filename)
            real_label_file_path_list.append(file_path)
    print(test_file_path_list)



# # 模型定义
# ## 定义自变量
# 定义要输入的维度AC、CNL、DEN、GR、RD、RS等
# input_vectors = ["AC","CNL","DEN","GR","RD","RS"]
input_vectors = ["AC","CNL","DEN","GR","RLLD","RLLS"]
# input_vectors = ["AC","CNL","DEN","GR"]


# ## 定义因变量
# 定义要训练的参数模型
# 读取元素曲线训练数据，包括"Al","Ca","Fe","K","Mg","Na","Si"等元素曲线  
# element_name = |"Al"|"Ca"|"Fe"|"K"|"Mg"|"Na"|"Si"|   

# 定义要目标曲线PERM、POR、SW
element = ["孔隙度","饱和度","渗透率"]
# 公式计算的物性参数曲线
reference = ["POR","SW","PERM"]

line_colors = ['aqua', 'darkorange', 'cornflowerblue']   #python3里的无穷循环器

target_colors = [ '#0070C0','#00B050','#00B0F0','#75DAFF','#FFC000', '#632423','#FFFF00']


# '-'       solid line style
# '--'      dashed line style
# '-.'      dash-dot line style
# ':'       dotted line style
plot_line_style = ['solid' , 'dashed', 'dashdot', 'dotted']

element_name =  ""        # "孔隙度"|"饱和度"|"渗透率"
reference_name = ""       # "POR"|"SW"|"PERM"


# 样本权重
weight_coloum = "sample_weight"

# 深度列名称
DEPTH_col_name = "DEPTH"     # "DEPTH"  | 深度

# ## 设定是训练操作还是测试操作

# 模型有两种阶段：  "train"(训练) | "test"(测试)

# model_stage = "train"
model_stage = "test"


# ## 选择要使用的模型
# 择要使用的模型类型model_type:  
# (1)'RBF'(flag = 1);   
# (2)'DNN'(flag = 2);  
# (3)'LSTM','GRU','GRU2','DNN_2'(flag = 3);  
# (4)'BLSTM', 'BGRU'(flag = 3),'MyWaveNet';'BiLSTM_Self_Atten'; 'BiGRU_Self_Atten';目前最好三种   
# (5)'WaveNet','MyUNet'  
# (6)'BiLSTM_Atten','BiLstm_WaveNet_Atten','C_BiLSTM_Atten'  
# (7) (暂未完成)  'NAS','BiLSTM_Self_Atten';   
# (8)'C_BiLSTM_Atten','WaveNet_BLSTM','BiGRU_Atten','X_BiGRU_Atten','MultiTask'  


# model_type = "RBF"
# RBF、DNN、MyWaveNet(seq4)、LSTM(seq4)、GRU、BLSTM、BGRU、BiLSTM_Self_Atten

# model_type =   "BiGRU_Self_Atten"     # 'BiGRU_Atten' | "BiGRU_Self_Atten"
# 升级版 1
# model_type =   "C_BiGRU"      #  C_BiGRU 'C_BiGRU_Atten'| "C_BiGRU_Self_Atten"
# 升级版 2
# model_type =   "C_BiGRU_Skip"      #  C_BiGRU 'C_BiGRU_Atten'| "C_BiGRU_Self_Atten"
# 升级版 3
# model_type =    "C_BiGRU_Self_Atten_2"      #'C_BiGRU_Self_Atten_3' 'C_BiGRU_Atten_2' | "C_BiGRU_Self_Atten_2"
# 升级版 4
# model_type = "C_BiGRU_Self_Atten_4"
# 升级版 0
# model_type =  "PSP_RBF"    # 'MultiTask'  | "MultiTask_Self_Atten"  MultiTask_Self_Atten  //PSP_RBF 新方法
# 变幻版 1
# model_type = "MultiTask_MultiHeadAttenSeflAttnDNN"   # "MultiTask_MultiHeadAtten" | "MultiTask_MultiHeadAttenWithDNN" | "MultiTask_MultiHeadAttenSeflAttnDNN"
# 变幻版 1
# model_type = "PSP_DNN"

model_type =  'C_LSTM_2'  # PSP_RBF_PARALLERL_SIMPLE | PSP_RBF_PARALLERL_SELFATTEN_MODEL
                                                  # PSP_PARALLERL_SELFATTEN_MODEL"
                            # C_LSTM_2   |    C_BiLSTM_2   |  C_BiLSTM_Self_Atten_2  
                            # PSP-LSTM(Parallel) |  PSP-BiLSTM(Parallel)  |  PSP-BiLSTM-SA(Parallel)   


# flag_1_list = ["RBF"]
# flag_2_list = ["DNN"]
# flag_3_list = ["LSTM","GRU","GRU2","DNN_2",
#                "BLSTM","BGRU","BiLstm_WaveNet_Atten","WaveNet_BLSTM",
#                "MyWaveNet","BiLSTM_Atten","BiLSTM_Self_Atten",
#                "BiGRU_Self_Atten"]
# flag_4_list = ["C_BiGRU","C_BiLSTM_Atten","C_BiGRU_Atten","C_BiGRU_Atten_2","X_BiGRU_Atten","MultiTask",
#                "C_BiGRU_Self_Atten","C_BiGRU_Self_Atten_2","MultiTask_Self_Atten",'MultiTask_Self_Atten',
#                'PSP_RBF',"MultiTask_MultiHeadAtten","MultiTask_MultiHeadAttenWithDNN",
#                "MultiTask_MultiHeadAttenSeflAttnDNN"]
# flag_5_list = ["PSP_RBF","PSP_DNN"]


# ## 设置和模型相关的参数

flag = sms.inference_flag(model_type)


print("model_type:", model_type)
print(flag)
if flag !=4:
    tasks_num = 1
else:
    tasks_num = len(element)

print("tasks_num:",tasks_num)


# ## 初始化训练模型结构参数

# 网络结构参数来自与senmodels模块

# MAX_SAMPLE_NUM = 2000
# 输入维度
data_dim = len(input_vectors)
seq_length = 20 #16  # 序列长度数 default 10,  TT1:4  J404:8
hidden_dim = 24  # 隐藏层神经元数 default 12, 24, 36 ; 对于双向序列网络，hidden_dim是单向的一半
# 输出维度
output_dim = 3
n_layers = 5 # LSTM layer 层数 default 4  
dropout_rate = 0.2   # 在训练过程中设置 default 0.2  ,Na,0.4

# 修改下面的参数不改变网络
learning_rate = 0.005   # default 0.01, 优选：0.005 ；0.008 可用0.0008 0.0001 0.001
# batch_size = 100 Na:
BATCH_SIZE = 1000
# iterations = 300
EPOCHS =  30  # 20

# input_vectors_dim = len(input_vectors)

model_para = sms.MyModelParameter(data_dim,seq_length, hidden_dim, output_dim,learning_rate,dropout_rate,n_layers,BATCH_SIZE,EPOCHS)
print("model_n_layers:", model_para.n_layers)

# 训练模型是否使用权重

# train_use_weight = False

# 是否训练分辨率增加模型，default:False | True

# train_add_R_model = False

# 完成是否播放音乐 True | False
paly_music = True # True


# # 是否保存训练日志：default : False | True， 保存日志可以用tensorboard显示实时计算日志，但是日志文件占用空间
# save_logs = False
# # 训练日志保存位置
# log_path = "logs"

# use_semi_seqlength = True
# 是否绘制模型图 default = False；  Value：True | False
plot_modelnet = True


for iter_task in list(zip(element,reference)):
    each_element = iter_task[0]
    each_reference = iter_task[1]
    element_name =  each_element        # "孔隙度"|"饱和度"|"渗透率"
    reference_name = each_reference       # "POR"|"SW"|"PERM"

    # ## 测试阶段模型保存位置
    print('------------------------处理' + element_name + '中-----------------------------------')
    # ## 模型测试
    # model.evaluate输入数据(data)和金标准(label),然后将预测结果与金标准相比较,得到两者误差并输出.
    # model.predict输入数据(data),输出预测结果
    # * 是否需要真实标签(金标准)
    # model.evaluate需要,因为需要比较预测结果与真实标签的误差
    # model.predict不需要,只是单纯输出预测结果,全程不需要金标准的参与.

    # history.params

    # predictions = model.predict(My_Test_X)
    # predictions.shape

    # # 对预测结果后处理
    # ## 对预测小于0的部分进行取0处理


    # # 数据加载及处理

    # 调用pandas的read_csv()方法时，默认使用C engine作为parser engine，而当文件名中含有中文的时候，用C engine在部分情况下就会出错。所以在调用read_csv()方法时指定engine为Python就可以解决问题了。
    ### 根据测试模式进行选择


    Date = sen.tid_date()
    model_child_dir_name = train_well_name + '_Seq_'+ str(seq_length)+ "/"
    # custom_model_child_dir = "J404_Seq_8_WaveNet/"
    # 设置模型保存的文件夹
    model_save_path = os.path.join("model/", 'element_' + model_type.lower() + "_train/")
    #model_save_path = os.path.join("model/", 'element_' + model_type.lower() + "_train/",model_child_dir_name)
    if os.path.exists(model_save_path):
        model_path = model_save_path
    else:
        os.mkdir(model_save_path)
        model_path = model_save_path
    print(model_path)

    model_name = train_well_name + "_" + model_type.lower() + "_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
        learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS) + ".h5"
    model_file = model_path + model_name
    print("model_name:",model_name)
    print("model_file:",model_file)

    # 定义模型保存的json_name
    json_name = train_well_name + "_" + model_type.lower() + "_" + element_name + '_'+ str(n_layers) + "_layers_" +  "_lr_" + str(
        learning_rate) + "h_dim" + str(hidden_dim) + "_epoch_" + str(EPOCHS) + ".json"
    model_json = model_path + json_name
    print(model_json)

    # ## 测试操作加载模型存放位置
    if model_stage == "test":
        pred_model_json = model_json
    pred_model_file = model_file
    #     custom_model_json =  "AB-J404-3961m-4240m_blstm_"+ element_name + "_4_layers__lr_0.005h_dim36_epoch_40.json"
    #     custom_model_file =  "AB-J404-3961m-4240m_blstm_"+ element_name + "_4_layers__lr_0.005h_dim36_epoch_40.h5"

    #     pred_model_json = os.path.join(model_path,custom_model_json)
    #     pred_model_file = os.path.join(model_path,custom_model_file)

    if not (os.path.exists(pred_model_json) and os.path.exists(pred_model_file)):
        print("预测模型不存在，程序结束，请训练相应模型")
        exit()



    csv_file_saving_path = "PaperHistory/noDepthExperimentHistory/csv_results_train_20230829/"
    csv_file_saving_path = os.path.join("csv_results_train_20230829/", model_type.lower() + "_test/")

    if not os.path.exists(csv_file_saving_path):
        os.mkdir(csv_file_saving_path)
    print(csv_file_saving_path)

    # ## 归一化操作
    constant_value = 3
    # 设置输入曲线范围,由于取了对数，所以RD和RS设置下限-1
    # if element_name == "孔隙度" or "饱和度":
    #     AC = [140,350]
    #     CNL = [-0.8,80]
    #     DEN = [1,3]
    #     GR = [0,350]
    #     RD = [0,8]
    #     RS = [0,7]
    #     RLLD = [0,8]
    #     RLLS = [0,7]
    #     #SP = [-100,200]
    # else:
    #     AC = [140,300]
    #     CNL = [-0.8,70]
    #     DEN = [1,3]
    #     GR = [0,350]
    #     RD = [0,5]
    #     RS = [0,5]
    #     RLLD = [0,5]
    #     RLLS = [0,5]
        #SP = [-100,200]

    # AC = [0,380] # AC = [140,350]
    # CNL = [-0.8,90]  # CNL = [-0.8,80]
    # DEN = [1,3]
    # GR = [2,240]  # GR = [0,350]
    # RD = [0,8]
    # RS = [0,7]
    # RLLD = [0,8]
    # RLLS = [0,7]

    AC = [180,420] # AC = [140,350]
    CNL = [0,90]  # CNL = [-0.8,80]
    DEN = [1,3]
    GR = [0,220]  # GR = [0,350]
    RD = [0,5]
    RS = [0,5]
    RLLD = [0,5]
    RLLS = [0,5]

    # 测井曲线计算的物性参数
    POR_CALC = [0,25]
    SW_CALC = [0,100]
    PERM_CALC = [-3 + constant_value, 1 + constant_value]

    # 岩石物理实验测量的物性参数
    POR = [0,25]
    SW = [0,100]
    PERM = [-3+ constant_value, 1 + constant_value]

    # 创建字典，根据输入的数据维度调正归一化的内容
    u_log = {"AC": AC, "CNL": CNL, "DEN": DEN, "GR": GR, "RD": RD, "RS": RS,"RLLD": RLLD, "RLLS": RLLS}
    # e_log = {"POR": POR, "SW": SW, "PERM": PERM,"孔隙度": POR, "饱和度": SW, "渗透率": PERM}
    e_log = {"孔隙度": POR, "饱和度": SW, "渗透率": PERM}
    e_CALC_log = {"POR": POR_CALC, "SW": SW_CALC, "PERM": PERM_CALC}

    u_log_name = []
    # 关键在于 input_vectors''
    for i in input_vectors:
        u_log_name.append(u_log[i])
    # u_log_name

    e_log_name = []
    for i in element:
        e_log_name.append(e_log[i])
    # e_log_name

    e_calc_log_name = []
    for i in reference:
        e_calc_log_name.append(e_CALC_log[i])

    use_depth_log = True # True
    use_depth = True # True
    # # # 网络实例化
    # # ## 构建网络或载入模型
    if mode == "Single":
        print("输入模型为:", pred_model_file)

    # ---------------------------------------------------------------------------------------------------
    # ## 模型可视化
    model = tf.keras.Model()
    with open(pred_model_json, "r") as json_file_1:
        json_config_1 = json_file_1.read()
    # 此处加载模型无需判断
    model = tf.keras.models.model_from_json(json_config_1,custom_objects={'GlorotUniform': tf.keras.initializers.GlorotUniform(),
                 'Zeros': tf.keras.initializers.Zeros(),
                 'RBFLayer': sms.RBFLayer,
                 'AttentionLayer': sms.AttentionLayer,
                 'My_Attention_layer': sms.My_Attention_layer})
    model.load_weights(pred_model_file)
    if mode == "Single":
        print(model.summary())

    #if  pg.find_graphviz() is not None:
    model_image_path = csv_file_saving_path
    if plot_modelnet == True:
        tf.keras.utils.plot_model(model,to_file= model_image_path + element_name + '_' + model_type + '_net.png',
                show_shapes=True,
                show_layer_names=True,
                rankdir='TB',
                expand_nested=False,
                dpi=96)

    # ## 日志保存内容设定
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")




    if mode == "Single":
        A_read = pd.read_csv(TestDataPath,engine='python',encoding='GBK',sep=",")
        A_read  = A_read.dropna()
        for item in (input_vectors):
            A_read[item] = pd.to_numeric(A_read[item])
        print(well_name,train_well_name)
        model_testing_image_name =  model_type + "_" + well_name + "_" + element_name
        child_dir_name = well_name + model_type.lower() + '_Seq_'+ str(seq_length)+ "_" + Date + "/"
        testing_img_file_saving_path = 'model_testing_images/'
        model_testing_img_file_saving_path = os.path.join(testing_img_file_saving_path,child_dir_name)
        if not os.path.exists(model_testing_img_file_saving_path):
            os.mkdir(model_testing_img_file_saving_path)
        print("Testing data loading....")
        testALL_A_X, depth_log_use, use_depth_log = preprocessing_testing_logdata(A_read, input_vectors, flag, u_log_name)

        begin_depth = depth_log_use[0][0]
        end_depth = depth_log_use[-1][0]
        # 加载模型进行预测
        testALL_Y_predict_final = inference_logs(testALL_A_X, model, flag, element_name, e_log_name, constant_value)

        result_data_table = generate_compare_logs(element_name, element, testALL_Y_predict_final, use_depth_log, DEPTH_col_name, depth_log_use)
        if use_depth_log == True:
            log_plot(DEPTH_col_name, result_data_table)

        add_flag, High_R, all_Y_clac_use = testing_ground_exists(flag, use_high_R_data, TestDataPath, HighRDataPath, element, reference, element_name, reference_name)
        if add_flag == 2:
            # 具有真实的测试数据
            plot_data, High_R_Label, Y_clac_use  = concate_testing_result_logs(flag, use_high_R_data, element, element_name, use_depth_log, depth_log_use, testALL_Y_predict_final, High_R, all_Y_clac_use)
            plot_testing_scatter(add_flag, High_R_Label,Y_clac_use,testALL_Y_predict_final,element,element_name)
            plot_testing_lines(add_flag, use_depth_log, High_R_Label,Y_clac_use,testALL_Y_predict_final,depth_log_use,element,element_name)
            # ## 预测值与真实标定值线性相关性分析
            p_t_rmse, p_t_rmae, p_t_r = evaluate_testing_result(element, tasks_num, High_R_Label, testALL_Y_predict_final)
            print(p_t_rmse, p_t_rmae, p_t_r)
            # ## 计算值和真实值相关性
            c_t_rmse, c_t_rmae, c_t_r = evaluate_testing_result(element, tasks_num, High_R_Label, Y_clac_use)
            print(c_t_rmse, c_t_rmae, c_t_r)

            row_data = str(p_t_r) + ',' + str(p_t_rmse) + ',' + str(p_t_rmae) + ','
            row_data = row_data + '\n' + str(c_t_r) + ','+ str(c_t_rmse) + ',' + str(c_t_rmae) + ','
            if tasks_num == 1:
                evaluation_csv_name = well_name + "_" + element_name + "_evaluation_Pred_R_" + sen.tid_maker() +'.txt'
            else:
                evaluation_csv_name = well_name + "_" + str(element) + "_evaluation_Pred_R_" + sen.tid_maker() +'.txt'
            f = open(os.path.join(csv_file_saving_path,evaluation_csv_name), "w+")
            f.write("evaluation:" + '\n' + "R,rmse,rmae" + '\n' + row_data)
            f.flush()
            f.close()

            # 绘制结果直方图
            plot_hist_figure(model_stage, tasks_num, use_high_R_data, add_flag,  High_R_Label,Y_clac_use,testALL_Y_predict_final,element_name,element)
            plot_distrubution_figure(model_stage, flag, tasks_num, use_high_R_data, High_R_Label,Y_clac_use,testALL_Y_predict_final,element_name,element)
            output_prediction_result(well_name,element,element_name,plot_data,csv_file_saving_path)
        elif add_flag == 3:
            plot_data, High_R_Label, Y_clac_use  = concate_testing_result_logs(flag, use_high_R_data, element, element_name, use_depth_log, depth_log_use, testALL_Y_predict_final, High_R, all_Y_clac_use)
            output_prediction_result(well_name,element,element_name,plot_data,csv_file_saving_path)
        else:
            pass
    else:
        # pass
        all_p_t_rmse = 0.0
        all_p_t_rmae = 0.0
        all_p_t_r = 0.0
        all_c_t_rmse = 0.0
        all_c_t_rmae = 0.0
        all_c_t_r = 0.0
        # 用于存放各组结果
        all_p_t_rmse_list = []
        all_p_t_rmae_list = []
        all_p_t_r_list = []
        all_c_t_rmse_list = []
        all_c_t_rmae_list = []
        all_c_t_r_list = []

        well_nums = len(test_file_path_list)
        for t in tqdm(range(well_nums), desc='处理中', ncols=100):
            current_test_file = test_file_path_list[t]
            current_real_label = real_label_file_path_list[t]
            # print(current_test_file)
            well_name = os.path.basename(current_test_file).split(".csv")[0]
            # print('处理' + well_name + ".csv")
            A_read = None
            A_read = pd.read_csv(current_test_file,engine='python',encoding='GBK',sep=",")

            A_read  = A_read.dropna()
            for item in (input_vectors):
                A_read[item] = pd.to_numeric(A_read[item])
            # print(well_name,train_well_name)
            model_testing_image_name =  model_type + "_" + well_name + "_" + element_name
            child_dir_name = well_name + '-'+ model_type.lower() + '_Seq_'+ str(seq_length)+ "_" + Date + "/"
            testing_img_file_saving_path = 'model_testing_images/'
            model_testing_img_file_saving_path = os.path.join(testing_img_file_saving_path,child_dir_name)
            if not os.path.exists(model_testing_img_file_saving_path):
                os.mkdir(model_testing_img_file_saving_path)
            # print("Testing data loading....")
            testALL_A_X, depth_log_use, use_depth_log = preprocessing_testing_logdata(A_read, input_vectors, flag, u_log_name)

            begin_depth = depth_log_use[0][0]
            end_depth = depth_log_use[-1][0]
            # 加载模型进行预测
            testALL_Y_predict_final = inference_logs(testALL_A_X, model, flag, element_name, e_log_name, constant_value)

            result_data_table = generate_compare_logs(element_name, element, testALL_Y_predict_final, use_depth_log, DEPTH_col_name, depth_log_use)
            if use_depth_log == True:
                log_plot(DEPTH_col_name, result_data_table)

            add_flag, High_R, all_Y_clac_use = testing_ground_exists(flag, use_high_R_data, current_test_file, current_real_label, element, reference, element_name, reference_name)
            if add_flag == 2:
                # 具有真实的测试数据
                plot_data, High_R_Label, Y_clac_use  = concate_testing_result_logs(flag, use_high_R_data, element, element_name, use_depth_log, depth_log_use, testALL_Y_predict_final, High_R, all_Y_clac_use)
                plot_testing_scatter(add_flag, High_R_Label,Y_clac_use,testALL_Y_predict_final,element,element_name)
                plot_testing_lines(add_flag, use_depth_log, High_R_Label,Y_clac_use,testALL_Y_predict_final,depth_log_use,element,element_name)
                # ## 预测值与真实标定值线性相关性分析
                p_t_rmse, p_t_rmae, p_t_r = evaluate_testing_result(element, tasks_num, High_R_Label, testALL_Y_predict_final)
                # print(p_t_rmse, p_t_rmae, p_t_r)
                all_p_t_rmse = np.add(all_p_t_rmse, p_t_rmse)
                all_p_t_rmae = np.add(all_p_t_rmae,p_t_rmae)
                all_p_t_r = np.add(all_p_t_r, p_t_r)

                all_p_t_rmse_list.append(p_t_rmse)
                all_p_t_rmae_list.append(p_t_rmae)
                all_p_t_r_list.append(p_t_r)
                # ## 计算值和真实值相关性
                c_t_rmse, c_t_rmae, c_t_r = evaluate_testing_result(element, tasks_num, High_R_Label, Y_clac_use)
                # print(c_t_rmse, c_t_rmae, c_t_r)
                all_c_t_rmse = np.add(all_c_t_rmse , c_t_rmse)
                all_c_t_rmae = np.add(all_c_t_rmae , c_t_rmae)
                all_c_t_r = np.add(all_c_t_r , c_t_r)

                all_c_t_rmse_list.append(c_t_rmse)
                all_c_t_rmae_list.append(c_t_rmae)
                all_c_t_r_list.append(c_t_r)

                # 多个文件处理时候，是否每个测试文件结果都写
                if WRITE_ALL_FILES_RESULT == True:
                    row_data = str(p_t_r) + ',' + str(p_t_rmse) + ',' + str(p_t_rmae)
                    row_data = row_data + '\n' + str(c_t_r) + ',' + str(c_t_rmse) + ',' + str(c_t_rmae)
                    if tasks_num == 1:
                        evaluation_csv_name = well_name + "_" + element_name + "_evaluation_Pred_R_" + sen.tid_maker() +'.csv'
                    else:
                        evaluation_csv_name = well_name + "_" + str(element) + "_evaluation_Pred_R_" + sen.tid_maker() +'.csv'
                    f = open(os.path.join(csv_file_saving_path,evaluation_csv_name), "w+")
                    f.write("evaluation:" + '\n' + "R,rmse,rmae" + '\n' + row_data)
                    f.flush()
                    f.close()

                # 绘制结果直方图
                plot_hist_figure(model_stage, tasks_num, use_high_R_data, add_flag,  High_R_Label,Y_clac_use,testALL_Y_predict_final,element_name,element)
                plot_distrubution_figure(model_stage, flag, tasks_num, use_high_R_data, High_R_Label,Y_clac_use,testALL_Y_predict_final,element_name,element)
                output_prediction_result(well_name,element,element_name,plot_data,csv_file_saving_path)
            elif add_flag == 3:
                plot_data, High_R_Label, Y_clac_use  = concate_testing_result_logs(flag, use_high_R_data, element, element_name, use_depth_log, depth_log_use, testALL_Y_predict_final, High_R, all_Y_clac_use)
                output_prediction_result(well_name,element,element_name,plot_data,csv_file_saving_path)
            else:
                pass


        average_p_t_rmse = np.divide(all_p_t_rmse, well_nums)
        average_p_t_rmae = np.divide(all_p_t_rmae , well_nums)
        average_p_t_r = np.divide(all_p_t_r , well_nums)

        average_c_t_rmse = np.divide(all_c_t_rmse , well_nums)
        average_c_t_rmae = np.divide(all_c_t_rmae , well_nums)
        average_c_t_r = np.divide(all_c_t_r , well_nums)
        print("-----------------Evalutaion------fianl--------------------")
        print("预测值与真实标定值线性相关性分析")
        average_p_t_rmse.shape = (len(average_p_t_rmse),)
        average_p_t_rmae.shape = (len(average_p_t_rmae),)
        average_p_t_r.shape = (len(average_p_t_r),)

        print('Average Test R:' , average_p_t_r)
        print('Average Test RMSE:', average_p_t_rmse)
        print('Average Test RMAE:', average_p_t_rmae)

        average_c_t_r.shape = (len(average_c_t_r),)
        average_c_t_rmse.shape = (len(average_c_t_rmse),)
        average_c_t_rmae.shape = (len(average_c_t_rmae),)
        print("计算值与真实标定值线性相关性分析")
        print('Average Calc R:', average_c_t_r)
        print('Average Calc RMSE:', average_c_t_rmse)
        print('Average Calc RMAE:', average_c_t_rmae)


        print("-----------------show all--------------------------")
        print("预测值与真实标定值线性相关性分析")
        all_p_t_r_array = np.array(all_p_t_r_list)
        all_p_t_r_array.shape = (len(all_p_t_r_array),)
        all_p_t_rmse_array = np.array(all_p_t_rmse_list)
        all_p_t_rmse_array.shape = (len(all_p_t_rmse_array),)
        all_p_t_rmae_array = np.array(all_p_t_rmae_list)
        all_p_t_rmae_array.shape = (len(all_p_t_rmae_array),)
        print('ALL Test R:' , all_p_t_r_array)
        print('ALL Test RMSE:', all_p_t_rmse_array)
        print('ALL Test RMAE:', all_p_t_rmae_array)

        all_c_t_r_array = np.array(all_c_t_r_list)
        all_c_t_r_array.shape = (len(all_c_t_r_array),)
        all_c_t_rmse_array = np.array(all_c_t_rmse_list)
        all_c_t_rmse_array.shape = (len(all_c_t_rmse_array),)
        all_c_t_rmae_array = np.array(all_c_t_rmae_list)
        all_c_t_rmae_array.shape = (len(all_c_t_rmae_array),)
        print("计算值与真实标定值线性相关性分析")
        print('ALL Calc R:', all_c_t_r_array)
        print('ALL Calc RMSE:', all_c_t_rmse_array)
        print('ALL Calc RMAE:', all_c_t_rmae_array)

        row_data = "Evaluation:" + '\n' + "method, Average_R, Average_rmse,Average_MSE, Average_rmae, Average_MAE " + '\n'
        row_data = row_data + 'Ours,' + str(average_p_t_r[0]) + ',' + str(average_p_t_rmse[0]) + ',' + str(pow(average_p_t_rmse[0],2)) + ',' + str(average_p_t_rmae[0]) + ',' + str(pow(average_p_t_rmae[0],2))
        row_data = row_data + '\n' +  'Current,' + str(average_c_t_r[0]) +  ',' + str(average_c_t_rmse[0]) + ','  + str(pow(average_c_t_rmse[0],2)) + ',' + str(average_c_t_rmae[0]) + ',' + str(pow(average_c_t_rmae[0],2))

        row_data_1 = 'Test Well,' + 'Test R,' + 'Test RMSE,' + 'Test MSE,' + 'Test RMAE,' + 'Test MAE,' +'Calc R,' + 'Calc RMSE,' + 'Calc MSE,'+ 'Calc RMAE' + 'Calc MAE' + '\n'
        test_file_path_array = np.array(test_file_path_list)
        test_file_path_array.shape = (len(test_file_path_array),)
        for each_line  in zip(test_file_path_array,all_p_t_r_array,all_p_t_rmse_array,all_p_t_rmae_array,all_c_t_r_array,all_c_t_rmse_array,all_c_t_rmae_array):
            each_well_name = each_line[0]
            each_P_R = each_line[1]
            each_P_RMSE = each_line[2]
            each_P_RMAE = each_line[3]
            each_C_R = each_line[4]
            each_C_RMSE = each_line[5]
            each_C_RMAE = each_line[6]
            row_data_1 = row_data_1 + each_well_name + ',' + str(each_P_R) + ',' + str(each_P_RMSE) + ',' + str(pow(each_P_RMSE,2)) + ',' + str(each_P_RMAE) + ',' + str(pow(each_P_RMAE,2)) + ',' + str(each_C_R) + ','  + str(each_C_RMSE)+ ','  + str(pow(each_C_RMSE,2))+ ',' + str(each_C_RMAE) + ',' + str(pow(each_C_RMAE,2))  + '\n'

        if tasks_num == 1:
            evaluation_csv_name = str(model_type.lower()) + "_"+ element_name + "_all-test_wells_evaluation_" + sen.tid_maker() +'.csv'
        else:
            evaluation_csv_name = str(model_type.lower()) + str(element) + "_all-test_wells_evaluation_" + sen.tid_maker() +'.csv'
        f = open(os.path.join(csv_file_saving_path,evaluation_csv_name), "w+")
        f.write(row_data + '\n')
        f.write('\n' + row_data_1)
        f.flush()
        f.close()



# --------------------------算法结束，音乐提示---------------------------------------------------
if paly_music == True:
    mixer.init()
    music_path = "music/"
    file = "中国爱乐乐团 - 月亮之上(交响乐版).mp3"  # "MISIA - 星のように.mp3"
    music_file = os.path.join(music_path,file)
    mixer.music.load(music_file)
    mixer.music.play()
    time.sleep(6)
    mixer.music.stop()
        # exit()


# #######################################################################