import os
import numpy as np
import torch
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import model_selection
from config_files.FACIES_BaseModel_Configs import Config

# 检查起始终止标签
def check_facieslabel_begin(labelY_array):
    if min(labelY_array) == 1:
        return 1
    elif min(labelY_array) == 0:
        return 0
    else:
        print("检查类别标签")
        exit()

# build processing datasets with data from AB with input and output labels
def build_All_Train_dataset(time_seriesX, time_seriesY, seq_length):
    dataM = []
    dataN = []
    for i in range(0, len(time_seriesX) - seq_length):

        _m = time_seriesX[i:i + seq_length]
        _n = time_seriesY[i + seq_length]
        # print(_z, "->", )
        dataM.append(_m)
        dataN.append(_n)

    return np.array(dataM), np.array(dataN)


# build processing datasets with data from series Y without labels
def build_All_Y_dataset(time_series, seq_length):
    dataZ = []
    # dataY = []
    #     for i in range(0, len(time_series)-seq_length):
    for i in range(0, len(time_series) - seq_length):
        # _z = time_series[i:i + seq_length]
        _z = time_series[i + seq_length]
        # _y = time_series[i + seq_length, [-1]]
        # print(_z, "->", )
        dataZ.append(_z)

    return np.array(dataZ)


def inference_flag(model_type):
    flag = 0
    flag_1_list = ["RBF"]
    flag_2_list = ["DNN"]
    flag_3_list = ["LSTM", "GRU", "GRU2", "DNN_2",
                   "BLSTM", "BGRU", "BiLstm_WaveNet_Atten", "WaveNet_BLSTM",
                   "MyWaveNet", "BiLSTM_Atten", "BiLSTM_Self_Atten",
                   "BiGRU_Self_Atten"]
    flag_4_list = ["C_BiGRU", "C_BiLSTM_Atten", "C_BiGRU_Atten", "C_BiGRU_Atten_2", "X_BiGRU_Atten", "MultiTask",
                   "C_BiGRU_Self_Atten", "C_BiGRU_Self_Atten_2", "MultiTask_Self_Atten", "MultiTask_Self_Atten",
                   "PSP_RBF", "MultiTask_MultiHeadAtten", "MultiTask_MultiHeadAttenSeflAttnDNN"]
    flag_5_list = ["PSP_RBF", "PSP_DNN", "PSP_DNN_PARALLERL_SIMPLE"]

    if model_type in flag_1_list:
        flag = 1
    elif model_type in flag_2_list:
        flag = 2
    elif model_type in flag_3_list:
        flag = 3
    elif model_type in flag_4_list:
        flag = 4
    elif model_type in flag_5_list:
        flag = 5
    else:
        flag = 3
    return flag


# -----------------------------------------------------------------------------------------

# model_type =   "BiGRU_Self_Atten"     # 'BiGRU_Atten' | "BiGRU_Self_Atten"
# 升级版 1
# model_type =   "C_BiGRU"      # 'C_BiGRU' | C_BiGRU_Atten'| "C_BiGRU_Self_Atten"
# 升级版 2
# model_type =    "C_BiGRU_Self_Atten_2"      # 'C_BiGRU_Atten_2' | "C_BiGRU_Self_Atten_2"
# 升级版 3
# model_type =  "PSP_DNN"    # 'MultiTask'  | "MultiTask_Self_Atten"  MultiTask_Self_Atten  //PSP_RBF 新方法   "PSP_DNN"
# 变幻版 1
# model_type = "MultiTask_MultiHeadAttenSeflAttnDNN"   # "MultiTask_MultiHeadAtten" | "MultiTask_MultiHeadAttenWithDNN" | "MultiTask_MultiHeadAttenSeflAttnDNN"

model_type ="BiGRU_Self_Atten"


flag = inference_flag(model_type)

print(os.getcwd())
# 'Facies Dataset'
data_dir = '..\\..\\data\\OrginData'
output_dir = '..\\..\\data\\FACIES'

if not os.path.exists(data_dir):
    # 针对第一次训练
    print(data_dir,"不存在")

if not os.path.exists(output_dir):
    # 针对第一次训练
    os.mkdir(output_dir)

# 训练数据路径
# TrainDataPath = 'train'
# 测试数据路径
# TestDataPath = 'test'
# 岩相所在列
facies_labels_col = "Facies"
# 深度列名称
DEPTH_col_name = "DEPTH"

# subject_data = np.loadtxt(f'{data_dir}/train/subject_train.txt')
# Samples
'''train_acc_x = np.loadtxt(f'{data_dir}/train/Inertial Signals/body_acc_x_train.txt')
train_acc_y = np.loadtxt(f'{data_dir}/train/Inertial Signals/body_acc_y_train.txt')
train_acc_z = np.loadtxt(f'{data_dir}/train/Inertial Signals/body_acc_z_train.txt')
train_gyro_x = np.loadtxt(f'{data_dir}/train/Inertial Signals/body_gyro_x_train.txt')
train_gyro_y = np.loadtxt(f'{data_dir}/train/Inertial Signals/body_gyro_y_train.txt')
train_gyro_z = np.loadtxt(f'{data_dir}/train/Inertial Signals/body_gyro_z_train.txt')
train_tot_acc_x = np.loadtxt(f'{data_dir}/train/Inertial Signals/total_acc_x_train.txt')
train_tot_acc_y = np.loadtxt(f'{data_dir}/train/Inertial Signals/total_acc_y_train.txt')
train_tot_acc_z = np.loadtxt(f'{data_dir}/train/Inertial Signals/total_acc_z_train.txt')
test_acc_x = np.loadtxt(f'{data_dir}/test/Inertial Signals/body_acc_x_test.txt')
test_acc_y = np.loadtxt(f'{data_dir}/test/Inertial Signals/body_acc_y_test.txt')
test_acc_z = np.loadtxt(f'{data_dir}/test/Inertial Signals/body_acc_z_test.txt')
test_gyro_x = np.loadtxt(f'{data_dir}/test/Inertial Signals/body_gyro_x_test.txt')
test_gyro_y = np.loadtxt(f'{data_dir}/test/Inertial Signals/body_gyro_y_test.txt')
test_gyro_z = np.loadtxt(f'{data_dir}/test/Inertial Signals/body_gyro_z_test.txt')
test_tot_acc_x = np.loadtxt(f'{data_dir}/test/Inertial Signals/total_acc_x_test.txt')
test_tot_acc_y = np.loadtxt(f'{data_dir}/test/Inertial Signals/total_acc_y_test.txt')
test_tot_acc_z = np.loadtxt(f'{data_dir}/test/Inertial Signals/total_acc_z_test.txt')'''


TrainDataPath = f'{data_dir}/GY1地质岩相-0512_facies_vectors_0.1.csv'
# TrainDataPath = f'{data_dir}/YX-58井_facies_vectors_0.02.csv'
AB_use = pd.read_csv(TrainDataPath,engine='python',encoding='GBK')


# Stacking channels together data
'''train_data = np.stack((train_acc_x, train_acc_y, train_acc_z,
                       train_gyro_x, train_gyro_y, train_gyro_z,
                       train_tot_acc_x, train_tot_acc_y, train_tot_acc_z), axis=2)'''

# input_vectors = ["DT","CNL","DEN","MSFL","GR","RD","RS"]
input_vectors = ["DT","CNL","DEN", "GR","RD","RS"]

inputTrainX = AB_use.dropna().reset_index(drop = True).loc[:,input_vectors]
trainX = np.array(inputTrainX)

'''X_test = np.stack((test_acc_x, test_acc_y, test_acc_z,
                   test_gyro_x, test_gyro_y, test_gyro_z,
                   test_tot_acc_x, test_tot_acc_y, test_tot_acc_z), axis=2)'''

TestDataPath = f'{data_dir}/YX58地质岩相-0512_facies_vectors_0.01.csv'
# TestDataPath = f'{data_dir}/YX58井_facies_vectors_0.02.csv'
# TestDataPath = f'{data_dir}/Z2911井_facies_vectors_0.02.csv'
A_read = pd.read_csv(TestDataPath,engine='python',encoding='GBK')

inputTestX = A_read.dropna().reset_index(drop = True).loc[:,input_vectors]
testX = np.array(inputTestX)


# labels
'''train_labels = np.loadtxt(f'{data_dir}/train/y_train.txt')'''
inputTrainY = AB_use.dropna().reset_index(drop = True).loc[:, facies_labels_col]
trainY = np.array(inputTrainY)

print("train_data.shape",trainX.shape)
print("train_labels.shape",trainY.shape)

'''y_test = np.loadtxt(f'{data_dir}/test/y_test.txt')'''

inputTestY = A_read.dropna().reset_index(drop = True).loc[:, facies_labels_col]
testY = np.array(inputTestY)

# 检查起始标签
label_status = check_facieslabel_begin(labelY_array=trainY)
if label_status == 1:
    print("------------类别标签从1开始---------------")
    trainY -= np.min(trainY)
    testY -= np.min(testY)
else:
    print("------------类别标签从0开始---------------")
    trainY = np.array(trainY, dtype=np.uint8)
    testY = np.array(testY, dtype=np.uint8)





# 对数据进行序列化处理
config = Config()
seq_length = config.data_seq_len


if (flag == 1) or (flag == 2):
    print("------------不需要序列化------------")
    X_train, X_val, y_train, y_val = train_test_split(trainX, trainY, test_size=0.2, random_state=42)
    X_test = testX
    y_test = testY
else:
    print("------------序列化------------")
    train_dataX, train_dataY = build_All_Train_dataset(trainX, trainY, seq_length)
    print("train_dataX.shape", train_dataX.shape)
    print("train_dataY.shape", train_dataY.shape)
    sss = model_selection.ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
    for train_index, val_index in sss.split(train_dataX):
        # print("TRAIN:", train_index, "TEST:", test_index)
        X_train, X_val = train_dataX[train_index], train_dataX[val_index]
        y_train, y_val = train_dataY[train_index], train_dataY[val_index]
    # 测试阶段,有标签
    X_test, y_test = build_All_Train_dataset(testX, testY, seq_length)
    # 测试阶段,没有标签
    # testALL_A_X = build_All_Y_dataset(testX, seq_length)



dat_dict = dict()
dat_dict["samples"] = torch.from_numpy(X_train)
dat_dict["labels"] = torch.from_numpy(y_train)
torch.save(dat_dict, os.path.join(output_dir, "train.pt"))

dat_dict = dict()
dat_dict["samples"] = torch.from_numpy(X_val)
dat_dict["labels"] = torch.from_numpy(y_val)
torch.save(dat_dict, os.path.join(output_dir, "val.pt"))

dat_dict = dict()
dat_dict["samples"] = torch.from_numpy(X_test)
dat_dict["labels"] = torch.from_numpy(y_test)
torch.save(dat_dict, os.path.join(output_dir, "test.pt"))


print("X_train.shape",X_train.shape)
print("y_train.shape",y_train.shape)
print("X_test.shape",X_test.shape)
print("y_test.shape",y_test.shape)


