"""
  此文件用来训练 hdv协同换道的transformer模型
"""

# !处理路径导入问题（添加绝对路径）！！！
import sys
import os
CODE_INTERNAL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # 生成Code文件夹内部对应的绝对路径
sys.path.append(CODE_INTERNAL_PATH)

# 导入外部包
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Reshape
from tensorflow import keras
from tensorflow.keras import layers

# 导入内部包
from utils.read_data import read_extract_lanechange_data
from utils.lstm import create_lstm_dataset, normalization
from utils.transformer import PositionalEncoding

EPOCHS = 50 # 迭代次数 
BATCH_SIZE = 64 # 一次迭代中的样本数量

FILE_PATH_I80_1_to = "../../Data/Ngsim数据集/I80数据集/3. 提取数据/2. 换道数据/trajectories-0400-0415_lanechange.txt" # 300 80 21 / 过滤非法数据后 91 80 24（师兄原始的实验数据量 260 297 14）
FILE_PATH_I80_2_to = "../../Data/Ngsim数据集/I80数据集/3. 提取数据/2. 换道数据/trajectories-0500-0515_lanechange.txt" # 251 80 21 / 过滤非法数据后 71 80 24（师兄原始的实验数据量 212 297 14）
FILE_PATH_I80_3_to = "../../Data/Ngsim数据集/I80数据集/3. 提取数据/2. 换道数据/trajectories-0515-0530_lanechange.txt" # 281 80 21 / 过滤非法数据后 73 80 24（师兄原始的实验数据量 211 297 14）
FILE_PATH_101_1_to = "../../Data/Ngsim数据集/101数据集/3. 提取数据/2. 换道数据/trajectories-0750am-0805am_lanechange.txt" # 293 80 21 / 过滤非法数据后 148 80 24（师兄原始的实验数据量 215 297 14）
FILE_PATH_101_2_to = "../../Data/Ngsim数据集/101数据集/3. 提取数据/2. 换道数据/trajectories-0805am-0820am_lanechange.txt" # 300 80 21 / 过滤非法数据后 163 80 24（师兄原始的实验数据量 242 297 14）
FILE_PATH_101_3_to = "../../Data/Ngsim数据集/101数据集/3. 提取数据/2. 换道数据/trajectories-0820am-0835am_lanechange.txt" # 326 80 21 / 过滤非法数据后 136 80 24（师兄原始的实验数据量 227 297 14）

def getData():
  data1 = read_extract_lanechange_data(FILE_PATH_I80_1_to)
  data2 = read_extract_lanechange_data(FILE_PATH_I80_2_to)
  data3 = read_extract_lanechange_data(FILE_PATH_I80_3_to)
  data4 = read_extract_lanechange_data(FILE_PATH_101_1_to)
  data5 = read_extract_lanechange_data(FILE_PATH_101_2_to)
  data6 = read_extract_lanechange_data(FILE_PATH_101_3_to)

  data = np.concatenate((data1, data2, data3, data4, data5, data6), axis=0)
  return data

def transformer_encoder(inputs, d_model, num_heads, dff, dropout=0.1):
    """Transformer编码器层"""
    # 多头注意力机制
    attn_output = layers.MultiHeadAttention(
        num_heads=num_heads, key_dim=d_model//num_heads
    )(inputs, inputs)
    attn_output = layers.Dropout(dropout)(attn_output)
    out1 = layers.LayerNormalization(epsilon=1e-6)(inputs + attn_output)

    # 前馈网络
    ffn = keras.Sequential([
        layers.Dense(dff, activation="relu"),
        layers.Dense(d_model)
    ])
    ffn_output = ffn(out1)
    ffn_output = layers.Dropout(dropout)(ffn_output)
    out2 = layers.LayerNormalization(epsilon=1e-6)(out1 + ffn_output)
    
    return out2

def build_transformer_model(input_shape, output_shape):
    """构建Transformer模型"""
    inputs = layers.Input(shape=input_shape)

    # 特征嵌入和位置编码
    x = layers.Dense(64)(inputs)  # 将特征维度扩展到64
    x = PositionalEncoding(64, max_len=input_shape[0])(x)

    # 堆叠两个编码器层
    x = transformer_encoder(x, d_model=64, num_heads=4, dff=128, dropout=0.1)
    x = transformer_encoder(x, d_model=64, num_heads=4, dff=128, dropout=0.1)

    # 输出层
    x = layers.GlobalAveragePooling1D()(x)  # 全局特征聚合
    x = layers.Dense(output_shape[0] * output_shape[1])(x)
    outputs = layers.Reshape(output_shape)(x)

    model = keras.Model(inputs=inputs, outputs=outputs)
    model.compile(loss="mse", optimizer="adam")
    return model

if __name__ == '__main__':
  # 读取数据
  lanechange_data = getData()
  print("数据集大小: ", len(lanechange_data), len(lanechange_data[0]), len(lanechange_data[0][0])) # 682 80 24

  # 归一化
  follow_data, min_max_list = normalization(lanechange_data, [10, 11, 12, 13, 14, 15, 18, 19, 21], [True, True, True, False, False, False, True, True, True]) # todo 这里需要再补充一个17索引？？？
  print("速度的min和max: ", min_max_list[0][0], min_max_list[0][1], min_max_list[1][0], min_max_list[1][1], min_max_list[2][0], min_max_list[2][1]) # 
  print("速加度的min和max: ", min_max_list[3][0], min_max_list[3][1], min_max_list[4][0], min_max_list[4][1], min_max_list[5][0], min_max_list[5][1]) # 
  print("相对间距的min和max: ", min_max_list[6][0], min_max_list[7][1], min_max_list[8][1]) # 

  # 创建训练集和验证集
  n_past, n_future = 20, 5
  X, Y = create_lstm_dataset(follow_data, n_past, n_future, [10, 11, 12, 13, 14, 15, 18, 19, 21], [15])
  x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.3, random_state=42)

  # 构建并训练Transformer模型
  model = build_transformer_model((n_past, 9), (n_future, 1))
  model.fit(x_train, y_train, 
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              validation_data=(x_val, y_val))
  # model.save("./model/lanechange_EPOCHS_50_BATCH_SIZE_64_transformer-normalization.keras")