"""
  此文件用来训练 头车自由驾驶的transformer模型
"""

# !处理路径导入问题（添加绝对路径）！！！
import sys
import os
CODE_INTERNAL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # 生成Code文件夹内部对应的绝对路径
sys.path.append(CODE_INTERNAL_PATH)

# 导入外部包
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers

# 导入内部包
from utils.read_data import read_extract_free_drive_data
from utils.lstm import create_lstm_dataset, normalization
from utils.transformer import PositionalEncoding

EPOCHS = 50
BATCH_SIZE = 64

FILE_PATH_I80_1_to = "../../Data/Ngsim数据集/I80数据集/3. 提取数据/3. 自由驾驶数据/trajectories-0400-0415_free_drive.txt"
FILE_PATH_I80_2_to = "../../Data/Ngsim数据集/I80数据集/3. 提取数据/3. 自由驾驶数据/trajectories-0500-0515_free_drive.txt"

def transformer_encoder(inputs, d_model, num_heads, dff, dropout=0.1):
    """Transformer编码器层"""
    # 多头注意力机制
    attn_output = layers.MultiHeadAttention(
        num_heads=num_heads, key_dim=d_model//num_heads
    )(inputs, inputs)
    attn_output = layers.Dropout(dropout)(attn_output)
    out1 = layers.LayerNormalization(epsilon=1e-6)(inputs + attn_output)

    # 前馈网络
    ffn = keras.Sequential([
        layers.Dense(dff, activation="relu"),
        layers.Dense(d_model)
    ])
    ffn_output = ffn(out1)
    ffn_output = layers.Dropout(dropout)(ffn_output)
    out2 = layers.LayerNormalization(epsilon=1e-6)(out1 + ffn_output)
    
    return out2

def build_transformer_model(input_shape, output_shape):
    """构建Transformer模型"""
    inputs = layers.Input(shape=input_shape)
    
    # 特征嵌入和位置编码
    x = layers.Dense(64)(inputs)  # 将特征维度扩展到64
    x = PositionalEncoding(64, max_len=input_shape[0])(x)
    
    # 堆叠两个编码器层
    x = transformer_encoder(x, d_model=64, num_heads=4, dff=128, dropout=0.1)
    x = transformer_encoder(x, d_model=64, num_heads=4, dff=128, dropout=0.1)
    
    # 输出层
    x = layers.GlobalAveragePooling1D()(x)  # 全局特征聚合
    x = layers.Dense(output_shape[0] * output_shape[1])(x)
    outputs = layers.Reshape(output_shape)(x)
    
    model = keras.Model(inputs=inputs, outputs=outputs)
    model.compile(loss="mse", optimizer="adam")
    return model

def getData():
    return np.array(read_extract_free_drive_data(FILE_PATH_I80_1_to))

if __name__ == "__main__":
    # 数据预处理（保持与原始代码一致）
    free_drive_datas = getData()
    free_drive_datas, min_max_list = normalization(free_drive_datas, [4, 5], [True, False])

    # 创建数据集
    n_past, n_future = 30, 10
    X, Y = create_lstm_dataset(free_drive_datas, n_past, n_future, [4, 5], [5])
    x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.3, random_state=42)

    # 构建并训练Transformer模型
    model = build_transformer_model((n_past, 2), (n_future, 1))
    model.fit(x_train, y_train, 
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              validation_data=(x_val, y_val))
    
    # model.save("./model/free_drive_EPOCHS_50_BATCH_SIZE_64_transformer-normalization.keras")