import os
import torch
from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from model.model import Transfollower, lstm_model, nn_model, conformer_xattn_model, nhits_model, informer_model, nbeats_model

from data.dataset import get_data
from model.model import Transfollower, lstm_model, nn_model, conformer_xattn_model, nhits_model
from config import Settings, HighDSettings

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # specify which GPU(s) to be used

DATASET = 'highD'  # ['SH', 'NGSIM', 'highD']

if DATASET == 'highD':
    settings = HighDSettings()
else:
    settings = Settings()

# 这里新增两种模型可选
MODEL = 'transfollower'  # ['transfollower','lstm','nn','conformer','nhits']

exp_name = f'{DATASET}_{MODEL}'
save = f'checkpoints/{exp_name}_model.pt'
writer = SummaryWriter(f'runs/{exp_name}')

# parameters
SEQ_LEN = settings.SEQ_LEN
if MODEL == 'nn':
    settings.LABEL_LEN = SEQ_LEN  # 你原来的逻辑保留

LABEL_LEN = settings.LABEL_LEN
PRED_LEN  = settings.PRED_LEN
BATCH_SIZE = settings.BATCH_SIZE
lr = settings.lr
T = settings.T  # data sampling interval
N_EPOCHES = settings.N_EPOCHES

criterion = nn.MSELoss()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if __name__ == '__main__':
    # 1) 先取数据
    train_loader, val_loader, _ = get_data(data_name='data', config=settings)

    # 2) 用一个 batch 推断 enc/dec 的特征维度
    tmp_batch = next(iter(train_loader))
    enc_inp_tmp = tmp_batch['his'].float()     # (B, T_enc, D_enc)
    batch_y_tmp = tmp_batch['svSpd'].float()   # (B, T_dec, 1)
    batch_y_mark_tmp = tmp_batch['pre'].float()# (B, T_dec, 1)

    dec_pad = torch.zeros([batch_y_tmp.shape[0], PRED_LEN, batch_y_tmp.shape[-1]]).float() + \
              batch_y_tmp[:, :LABEL_LEN, :].mean(axis=1, keepdim=True)
    dec_inp_tmp = torch.cat([batch_y_tmp[:, :LABEL_LEN, :], dec_pad], dim=1)
    dec_inp_tmp = torch.cat([dec_inp_tmp, batch_y_mark_tmp], axis=-1)  # (B, T_dec, 2)

    D_enc = enc_inp_tmp.shape[-1]  # 常见 9
    D_dec = dec_inp_tmp.shape[-1]  # 常见 2

    # 3) 实例化模型（按推断维度）
    if MODEL == 'informer':
        model = Transfollower(config=settings).to(device)
    elif MODEL == 'informer':
        model = informer_model(config=settings, enc_in=D_enc, dec_in=D_dec, d_model=192, nhead=6, e_layers=3,
                               d_layers=2, d_ff=512, dropout=0.1).to(device)
    elif MODEL == 'nbeats':
        model = nbeats_model(config=settings, input_size=D_dec, width=512, n_blocks=4, n_layers=4).to(device)

    elif MODEL == 'lstm':
        model = lstm_model(config=settings, input_size=D_enc).to(device)
    elif MODEL == 'nn':
        model = nn_model(config=settings, input_size=D_dec).to(device)
    elif MODEL == 'conformer':
        model = conformer_xattn_model(config=settings, input_size=D_enc, d_model=192, nhead=6, num_layers=4, dropout=0.1).to(device)
    elif MODEL == 'nhits':
        model = nhits_model(config=settings, input_size=D_dec, hidden=256, stacks_per_scale=2, use_conv=True).to(device)
    else:
        raise ValueError(f"Unknown MODEL={MODEL}")

    model_optim = optim.Adam(model.parameters(), lr=lr)

    def build_dec_inp(batch_y, batch_y_mark):
        dec = torch.zeros([batch_y.shape[0], PRED_LEN, batch_y.shape[-1]]).float() + \
              batch_y[:, :LABEL_LEN, :].mean(axis=1, keepdim=True)
        dec = torch.cat([batch_y[:, :LABEL_LEN, :], dec], dim=1).float()
        dec = torch.cat([dec, batch_y_mark], axis=-1)  # (B, T_dec, 2)
        return dec

    def val(data_loader):
        model.eval()
        total_loss = []
        with torch.no_grad():
            for item in data_loader:
                enc_inp = item['his'].float().to(device)       # (B, T_enc, D_enc)
                batch_y = item['svSpd'].float()                # (B, T_dec, 1)
                y_label = batch_y[:, -PRED_LEN:, :].to(device) # (B, PRED_LEN, 1)
                batch_y_mark = item['pre'].float()             # (B, T_dec, 1)

                dec_inp = build_dec_inp(batch_y, batch_y_mark).to(device)  # (B, T_dec, 2)

                # 前向分支
                if MODEL in ['nn', 'nhits']:
                    out = model(dec_inp)
                elif MODEL == 'transfollower':
                    out = model(enc_inp, dec_inp)[0]
                else:  # 'lstm' or 'conformer'
                    out = model(enc_inp, dec_inp)

                pred_lon_speed = out[:, :, 0].unsqueeze(-1)
                lon_speed_loss = criterion(pred_lon_speed, y_label)

                total_loss.append(lon_speed_loss.item())
        model.train()
        return np.mean(total_loss)

    # train
    best_val_loss = None
    model.train()
    for epoch in range(N_EPOCHES):
        train_losses = []
        for item in train_loader:
            enc_inp = item['his'].float().to(device)
            batch_y = item['svSpd'].float()
            y_label = batch_y[:, -PRED_LEN:, :].to(device)
            batch_y_mark = item['pre'].float()

            dec_inp = build_dec_inp(batch_y, batch_y_mark).to(device)

            if MODEL in ['nn', 'nhits']:
                out = model(dec_inp)
            elif MODEL == 'transfollower':
                out = model(enc_inp, dec_inp)[0]
            else:  # 'lstm' or 'conformer'
                out = model(enc_inp, dec_inp)

            pred_lon_speed = out[:, :, 0].unsqueeze(-1)
            loss = criterion(pred_lon_speed, y_label)

            model_optim.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
            model_optim.step()

            train_losses.append(loss.item())

        train_loss = np.mean(train_losses)
        val_loss = val(val_loader)

        if best_val_loss is None or best_val_loss > val_loss:
            with open(save, 'wb') as f:
                torch.save(model, f)
            best_val_loss = val_loss

        print(f"Epoch: {epoch + 1}| Train Loss: {train_loss:.7f} Vali Loss: {val_loss:.7f} Best val loss: {best_val_loss:.7f}")
        writer.add_scalar('Loss/train', train_loss, epoch)
        writer.add_scalar('Loss/vali', val_loss, epoch)
    writer.close()
