import torch
from torch import nn, optim
from tensorboardX import SummaryWriter
from data.dataset import get_data
from model.model import *
from config import *
import os
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)

os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
DATASET = 'highD'

settings = HighDSettings() if DATASET == 'highD' else Settings()
exp_name = f'{DATASET}_transfollower'
save = f'checkpoints/{exp_name}_model.pt'
writer = SummaryWriter(f'runs/{exp_name}')

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Transfollower(config=settings).to(device)
model_optim = optim.Adam(model.parameters(), lr=settings.lr)

mse_loss = nn.MSELoss()
# 动态学习率调度器


def construct_dec_inp(batch_x, pre_all, label_len, pred_len):
    dec_inp = torch.zeros([batch_x.shape[0], pred_len, batch_x.shape[-1]], device=batch_x.device) + \
              batch_x[:, :label_len, :].mean(dim=1, keepdim=True)
    dec_inp = torch.cat([batch_x[:, :label_len, :], dec_inp], dim=1)
    return torch.cat([dec_inp, pre_all], dim=-1)


# 验证过程
def val(data_loader):
    model.eval()
    total_loss = []
    with torch.no_grad():
        for item in data_loader:
            batch_x = item['latSpd'].to(device)
            x_label = batch_x[:, -settings.PRED_LEN:, :]
            roadbev_his = item['roadbev_his'].to(device)
            roadbev_pre = item['roadbev_pre'].to(device)
            data_pre = item['pre'].to(device)
            data_pre = construct_dec_inp(batch_x, data_pre, settings.LABEL_LEN, settings.PRED_LEN)
            # data_pre = data_extractor(data_pre.to(device), input_dim=62)
            # data_his = data_extractor(item['his'].to(device), input_dim=62)
            # his_all = fusion_road_data(roadbev_his, data_his, output_dim=256)
            # pre_all = fusion_road_data(roadbev_pre, data_pre, output_dim=256)  # 修改为 256
            data_his = item['his'].to(device)
            enc_inp = data_his
            dec_inp = data_pre
            enc_inp = enc_inp.float()
            dec_inp = dec_inp.float()
            x_label = x_label.float()
            out = model(enc_inp.float(), dec_inp.float(), roadbev_his, roadbev_pre)
            pred_lat_speed = out[0][:, :, 0].unsqueeze(-1)
            loss = mse_loss(pred_lat_speed, x_label)
            total_loss.append(loss.item())
    return sum(total_loss) / len(total_loss)

# 主训练过程
if __name__ == '__main__':
    train_loader, val_loader, _ = get_data(settings, data_name='data')
    best_val_loss = float('inf')
    for epoch in range(settings.N_EPOCHES):
        model.train()
        train_loss = 0.0
        for item in train_loader:
            batch_x = item['latSpd'].to(device)
            x_label = batch_x[:, -settings.PRED_LEN:, :]
            roadbev_his = item['roadbev_his'].to(device)
            roadbev_pre = item['roadbev_pre'].to(device)
            data_pre = item['pre'].to(device)
            data_pre = construct_dec_inp(batch_x, data_pre, settings.LABEL_LEN, settings.PRED_LEN)
            # data_pre = data_extractor(data_pre.to(device), input_dim=62)
            # data_his = data_extractor(item['his'].to(device), input_dim=62)
            # his_all = fusion_road_data(roadbev_his, data_his, output_dim=256)
            # pre_all = fusion_road_data(roadbev_pre, data_pre, output_dim=256)  # 修改为 256
            data_his = item['his'].to(device)
            enc_inp = data_his
            dec_inp = data_pre
            enc_inp = enc_inp.float()
            dec_inp = dec_inp.float()
            x_label = x_label.float()
            out = model(enc_inp.float(), dec_inp.float(),roadbev_his,roadbev_pre)
            pred_lat_speed = out[0][:, :, 0].unsqueeze(-1)
            loss = mse_loss(pred_lat_speed, x_label)
            model_optim.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
            model_optim.step()
            train_loss += loss.item()

        # 计算平均训练损失
        train_loss /= len(train_loader)
        val_loss = val(val_loader)

        # 动态调整学习率

        # 保存最佳模型
        if best_val_loss is None or best_val_loss > val_loss:
            with open(save, 'wb') as f:
                torch.save(model, f)
            best_val_loss = val_loss

        print(
            f"Epoch: {epoch + 1} | Train Loss: {train_loss:.7f} | Val Loss: {val_loss:.7f} | Best Val Loss: {best_val_loss:.7f}")

        # TensorBoard 记录
        writer.add_scalar('Loss/train', train_loss, epoch)
        writer.add_scalar('Loss/vali', val_loss, epoch)

    writer.close()
##################################加上动态学习器
# import torch
# from torch import nn
# from torch import optim
# from torch.utils.tensorboard import SummaryWriter
# import numpy as np
# from torch.optim.lr_scheduler import ReduceLROnPlateau
# from data.dataset import get_data
# from model.model import Transfollower, lstm_model, nn_model
# from config import Settings, HighDSettings
# import os
#
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # specify which GPU(s) to be used
#
# DATASET = 'highD'  # ['SH', 'NGSIM', 'highD']
#
# if DATASET == 'highD':
#     settings = HighDSettings()
# else:
#     settings = Settings()
#
# MODEL = 'transfollower'  # ['transfollower','lstm', 'nn']
#
# exp_name = f'{DATASET}_{MODEL}'
# save = f'checkpoints/{exp_name}_model.pt'
# writer = SummaryWriter(f'runs/{exp_name}')
#
# # parameters
# SEQ_LEN = settings.SEQ_LEN
#
# if MODEL == 'nn':
#     settings.LABEL_LEN = SEQ_LEN
#
# LABEL_LEN = settings.LABEL_LEN
# PRED_LEN = settings.PRED_LEN
# BATCH_SIZE = settings.BATCH_SIZE
# lr = settings.lr
# T = settings.T  # data sampling interval
# N_EPOCHES = settings.N_EPOCHES
#
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# if MODEL == 'transfollower':
#     model = Transfollower(config=settings).to(device)
# elif MODEL == 'lstm':
#     model = lstm_model(config=settings).to(device)
# elif MODEL == 'nn':
#     model = nn_model(config=settings).to(device)
#
# model_optim = optim.Adam(model.parameters(), lr=lr)
# criterion = nn.MSELoss()
#
# # 添加动态学习率调度器
# scheduler = ReduceLROnPlateau(model_optim, 'min', patience=10, factor=0.5)
#
# if __name__ == '__main__':
#     train_loader, val_loader, _ = get_data(data_name='data', config=settings)
#
#
#     def val(data_loader):
#         model.eval()
#         total_loss = []
#         with torch.no_grad():
#             for i, item in enumerate(data_loader):
#                 enc_inp = item['his'].float().to(device)  # (256,50,9)
#
#                 batch_y = item['svSpd'].float()  # (256,149,1)
#                 y_label = batch_y[:, -PRED_LEN:, :].to(device)  # (256,137,1)
#                 batch_y_mark = item['pre'].float().to(device)  # (256,149,1)
#
#                 dec_inp = torch.zeros([batch_y.shape[0], PRED_LEN, batch_y.shape[-1]]).float() + \
#                           batch_y[:, :LABEL_LEN, :].mean(axis=1,
#                                                          keepdim=True)  # 前一个的形状是[256,137,1]填充的后一个的形状是[256,1,1],不同批次不同   自我车辆
#                 dec_inp = torch.cat([batch_y[:, :LABEL_LEN, :], dec_inp], dim=1).float().to(device)  # 拼接[256,149,1]
#                 dec_inp = torch.cat([dec_inp, batch_y_mark], axis=-1)  # adding lv speed 拼接成[256,149,2]
#
#                 # encoder - decoder
#                 if MODEL == 'nn':
#                     out = model(dec_inp)
#                 elif MODEL == 'transfollower':
#                     out = model(enc_inp, dec_inp)[0]  # 输出的第1个对应  model.py里的out[256,137,1]
#                 else:
#                     out = model(enc_inp, dec_inp)
#
#                 pred_lon_speed = out[:, :, 0].unsqueeze(-1)  # 纵向速度预测 (256, 137, 1)
#
#                 # 纵向速度损失
#                 lon_speed_loss = criterion(pred_lon_speed, y_label)
#
#                 # 总损失
#                 loss = lon_speed_loss
#
#                 total_loss.append(loss.item())
#             model.train()
#             return np.mean(total_loss)
#
#
#     # train
#     best_val_loss = None
#     model.train()
#     for epoch in range(N_EPOCHES):
#         train_losses = []
#         for i, item in enumerate(train_loader):
#             enc_inp = item['his'].float().to(device)  # (256,50,9)
#
#             batch_y = item['svSpd'].float()  # (256,149,1)
#             y_label = batch_y[:, -PRED_LEN:, :].to(device)  # (256,137,1)
#             batch_y_mark = item['pre'].float().to(device)  # (256,149,1)
#
#             dec_inp = torch.zeros([batch_y.shape[0], PRED_LEN, batch_y.shape[-1]]).float() + \
#                       batch_y[:, :LABEL_LEN, :].mean(axis=1,keepdim=True)  # 前一个的形状是[256,137,1]填充的后一个的形状是[256,1,1],不同批次不同   自我车辆
#             dec_inp = torch.cat([batch_y[:, :LABEL_LEN, :], dec_inp], dim=1).float().to(device)  # 拼接[256,149,1]
#             dec_inp = torch.cat([dec_inp, batch_y_mark], axis=-1)  # adding lv speed 拼接成[256,149,2]
#
#             # encoder - decoder
#             if MODEL == 'nn':
#                 out = model(dec_inp)
#             elif MODEL == 'transfollower':
#                 out = model(enc_inp, dec_inp)[0]  # 输出的第1个对应  model.py里的out[256,137,1]
#             else:
#                 out = model(enc_inp, dec_inp)
#
#             pred_lon_speed = out[:, :, 0].unsqueeze(-1)  # 纵向速度预测 (256, 137, 1)
#
#             # 纵向速度损失
#             lon_speed_loss = criterion(pred_lon_speed, y_label)
#
#             # 总损失
#             loss = lon_speed_loss
#
#             model_optim.zero_grad()
#             loss.backward()
#             torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
#             model_optim.step()
#
#             train_losses.append(loss.item())
#
#         train_loss = np.mean(train_losses)
#         val_loss = val(val_loader)
#
#         # 更新学习率
#         scheduler.step(val_loss)
#         for param_group in model_optim.param_groups:
#             print(f"Updated Learning Rate: {param_group['lr']:.6f}")
#         # 保存最佳模型
#         if best_val_loss is None or best_val_loss > val_loss:
#             with open(save, 'wb') as f:
#                 torch.save(model, f)
#             best_val_loss = val_loss
#
#         print("Epoch: {0}| Train Loss: {1:.7f} Vali Loss: {2:.7f} Best val loss: {3:.7f}".format(
#             epoch + 1, train_loss, val_loss, best_val_loss))
#
#         writer.add_scalar('Loss/train', train_loss, epoch)
#         writer.add_scalar('Loss/vali', val_loss, epoch)
#
#     writer.close()
#

###############################自定义损失函数
# import torch
# from torch import nn
# from torch import optim
# from torch.utils.tensorboard import SummaryWriter
# import numpy as np
# import os
#
# # 这里导入自定义的损失函数
#
#
# from data.dataset import get_data
# from model.model import Transfollower, lstm_model, nn_model
# from config import Settings, HighDSettings
#
#
# class CustomLoss(nn.Module):
#     def __init__(self, lambda_smooth=0.1, lambda_outlier=1.5, outlier_threshold=0.1):
#         super(CustomLoss, self).__init__()
#         self.lambda_smooth = lambda_smooth  # 平滑项的权重
#         self.lambda_outlier = lambda_outlier  # 异常值加权的权重
#         self.outlier_threshold = outlier_threshold  # 异常值的阈值
#
#     def forward(self, y_true, y_pred):
#         # 基本MSE损失
#         mse_loss = nn.MSELoss()(y_pred, y_true)
#
#         # 计算异常值加权损失
#         # 异常值是那些横向速度超过设定阈值的值
#         outlier_mask = torch.abs(y_true) > self.outlier_threshold
#         outlier_loss = torch.sum(outlier_mask * (y_pred - y_true) ** 2)
#
#         # 平滑损失：惩罚横向速度的快速变化
#         # 假设我们希望模型对平滑的变化更敏感
#         smooth_loss = torch.sum(torch.abs(y_pred[1:] - y_pred[:-1]))  # 连续点之间的变化
#
#         # 总损失
#         total_loss = mse_loss + self.lambda_outlier * outlier_loss + self.lambda_smooth * smooth_loss
#         return total_loss
#
#
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # specify which GPU(s) to be used
#
# DATASET = 'highD'  # ['SH', 'NGSIM', 'highD']
#
# if DATASET == 'highD':
#     settings = HighDSettings()
# else:
#     settings = Settings()
#
# MODEL = 'transfollower'  # ['transfollower','lstm', 'nn']
#
# exp_name = f'{DATASET}_{MODEL}'
# save = f'checkpoints/{exp_name}_model.pt'
# writer = SummaryWriter(f'runs/{exp_name}')
#
# # parameters
# SEQ_LEN = settings.SEQ_LEN
#
# if MODEL == 'nn':
#     settings.LABEL_LEN = SEQ_LEN
#
# LABEL_LEN = settings.LABEL_LEN
# PRED_LEN = settings.PRED_LEN
# BATCH_SIZE = settings.BATCH_SIZE
# lr = settings.lr
# T = settings.T  # data sampling interval
# N_EPOCHES = settings.N_EPOCHES
#
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# if MODEL == 'transfollower':
#     model = Transfollower(config=settings).to(device)
# elif MODEL == 'lstm':
#     model = lstm_model(config=settings).to(device)
# elif MODEL == 'nn':
#     model = nn_model(config=settings).to(device)
#
# model_optim = optim.Adam(model.parameters(), lr=lr)
#
# # 使用自定义损失函数
# criterion = CustomLoss(lambda_smooth=0.1, lambda_outlier=1.5, outlier_threshold=0.1)
#
# if __name__ == '__main__':
#     train_loader, val_loader, _ = get_data(data_name='data', config=settings)
#
#
#     def val(data_loader):
#         model.eval()
#         total_loss = []
#         with torch.no_grad():
#             for i, item in enumerate(data_loader):
#                 enc_inp = item['his'].float().to(device)  # (256,50,9)
#
#                 batch_y = item['svSpd'].float()  # (256,149,1)
#                 y_label = batch_y[:, -PRED_LEN:, :].to(device)  # (256,137,1)
#                 batch_y_mark = item['pre'].float().to(device)  # (256,149,1)
#
#                 dec_inp = torch.zeros([batch_y.shape[0], PRED_LEN, batch_y.shape[-1]]).float() + \
#                           batch_y[:, :LABEL_LEN, :].mean(axis=1,
#                                                          keepdim=True)  # 前一个的形状是[256,137,1]填充的后一个的形状是[256,1,1],不同批次不同 自我车辆
#                 dec_inp = torch.cat([batch_y[:, :LABEL_LEN, :], dec_inp], dim=1).float().to(device)  # 拼接[256,149,1]
#                 dec_inp = torch.cat([dec_inp, batch_y_mark], axis=-1)  # adding lv speed 拼接成[256,149,2]
#
#                 # encoder - decoder
#                 if MODEL == 'nn':
#                     out = model(dec_inp)
#                 elif MODEL == 'transfollower':
#                     out = model(enc_inp, dec_inp)[0]  # 输出的第1个对应  model.py里的out[256,137,1]
#                 else:
#                     out = model(enc_inp, dec_inp)
#
#                 pred_lon_speed = out[:, :, 0].unsqueeze(-1)  # 纵向速度预测 (256, 137, 1)
#
#                 # 纵向速度损失
#                 lon_speed_loss = criterion(pred_lon_speed, y_label)
#
#                 # 总损失
#                 loss = lon_speed_loss
#
#                 total_loss.append(loss.item())
#             model.train()
#             return np.mean(total_loss)
#
#
#     # train
#     best_val_loss = None
#     model.train()
#     for epoch in range(N_EPOCHES):
#         train_losses = []
#         for i, item in enumerate(train_loader):
#             enc_inp = item['his'].float().to(device)  # (256,50,9)
#
#             batch_y = item['svSpd'].float()  # (256,149,1)
#             y_label = batch_y[:, -PRED_LEN:, :].to(device)  # (256,137,1)
#             batch_y_mark = item['pre'].float().to(device)  # (256,149,1)
#
#             dec_inp = torch.zeros([batch_y.shape[0], PRED_LEN, batch_y.shape[-1]]).float() + \
#                       batch_y[:, :LABEL_LEN, :].mean(axis=1,
#                                                      keepdim=True)  # 前一个的形状是[256,137,1]填充的后一个的形状是[256,1,1],不同批次不同 自我车辆
#             dec_inp = torch.cat([batch_y[:, :LABEL_LEN, :], dec_inp], dim=1).float().to(device)  # 拼接[256,149,1]
#             dec_inp = torch.cat([dec_inp, batch_y_mark], axis=-1)  # adding lv speed 拼接成[256,149,2]
#
#             # encoder - decoder
#             if MODEL == 'nn':
#                 out = model(dec_inp)
#             elif MODEL == 'transfollower':
#                 out = model(enc_inp, dec_inp)[0]  # 输出的第1个对应  model.py里的out[256,137,1]
#             else:
#                 out = model(enc_inp, dec_inp)
#
#             pred_lon_speed = out[:, :, 0].unsqueeze(-1)  # 纵向速度预测 (256, 137, 1)
#
#             # 纵向速度损失
#             lon_speed_loss = criterion(pred_lon_speed, y_label)
#
#             # 总损失
#             loss = lon_speed_loss
#
#             model_optim.zero_grad()
#             loss.backward()
#             torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
#             model_optim.step()
#
#             train_losses.append(loss.item())
#
#         train_loss = np.mean(train_losses)
#         val_loss = val(val_loader)
#
#         # 保存最佳模型
#         if best_val_loss is None or best_val_loss > val_loss:
#             with open(save, 'wb') as f:
#                 torch.save(model, f)
#             best_val_loss = val_loss
#
#         print(
#             f"Epoch: {epoch + 1}| Train Loss: {train_loss:.7f} Vali Loss: {val_loss:.7f} Best val loss: {best_val_loss:.7f}")
#
#         writer.add_scalar('Loss/train', train_loss, epoch)
#         writer.add_scalar('Loss/vali', val_loss, epoch)
#     writer.close()
