import datetime

from torch.utils.data import DataLoader
from dataset import TrafficDataset
from model import TransAm, LSTMModel, lstm_init_weights, Transformer
import argparse
import torch
import torch.nn as nn
from tqdm import tqdm
import math
from utils import PlotUtils
from constant import Constant
from torch.utils.tensorboard import SummaryWriter
import os


def test(args, model, data_loader, criterion):
    model.eval()
    total_loss = 0.
    predictions = torch.Tensor(0)
    truth = torch.Tensor(0)
    with torch.no_grad():
        for data, label, tgt_in in data_loader:
            data, label = data.to(args.device).float(), label.to(args.device).float()
            # label = label.reshape((label.shape[0], label.shape[1], -1))
            # label = label.permute((1, 0, 2))
            if args.have_decoder:
                tgt_in = tgt_in.to(args.device).float()
                output = model(data, tgt_in)
            else:
                output = model(data)
            total_loss += criterion(output, label).item()
            predictions = torch.cat((predictions, output[:, -Constant.OUTPUT_WINDOW].view(-1).cpu()), 0)
            truth = torch.cat((truth, label[:, -Constant.OUTPUT_WINDOW].view(-1).cpu()), 0)

        loss = total_loss / len(data_loader)
        tqdm.write(f'valid loss : {loss:.5f}, ppl:{math.exp(loss):.5f}')
        tqdm.write("-" * 100)
    return truth, predictions, loss, math.exp(loss)


def train(args, model, data_loader, criterion, optimizer, epoch, writer):
    model.train()
    batch_len = len(data_loader)

    process_bar = tqdm(total=batch_len)
    for idx, (data, label, tgt_in) in enumerate(data_loader):
        process_bar.update(1)
        data, label = data.to(args.device).float(), label.to(args.device).float()
        # label = label.reshape((label.shape[0], label.shape[1], -1))
        # label = label.permute((1, 0, 2))
        optimizer.zero_grad()
        if args.have_decoder:
            tgt_in = tgt_in.to(args.device).float()
            output = model(data, tgt_in)
        else:
            output = model(data)
        loss = criterion(output, label)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.7)
        optimizer.step()

        writer.add_scalar("train_loss", loss.item(), epoch * batch_len + idx)
        writer.add_scalar("train_ppl", math.exp(loss.item()), epoch * batch_len + idx)

        if idx % 10 == 0:
            tqdm.write(f'[epoch:{epoch:>3d}] loss:{loss.item():.7f} , ppl:{math.exp(loss.item())} .')


def get_model(args):
    """模型实例化"""
    feature_choose = Constant.FEATURES_CHOOSE
    n_inputs = len(Constant.FEATURES_MAP[feature_choose]) + 1

    model_name = Constant.MODELS[Constant.MODEL_CHOOSE]
    print(f'you are using {model_name}...')
    args.model = model_name
    if model_name == 'transformer_encoder':
        return TransAm(input_feature_size=n_inputs).to(args.device)
    elif model_name == 'LSTM':
        return LSTMModel(n_input=n_inputs).to(args.device).apply(lstm_init_weights)
    elif model_name == 'transformer':
        args.have_decoder = True
        return Transformer(n_encoder_inputs=n_inputs, n_decoder_inputs=n_inputs).to(args.device)
    else:
        raise ValueError


def main(main_args):
    # 获取模型
    model = get_model(main_args)
    print(main_args.have_decoder)

    print(f'you are using features: {Constant.FEATURES_MAP[Constant.FEATURES_CHOOSE]}'
          f'to predict {Constant.TARGET}...')
    train_dataset = TrafficDataset(train=True)
    train_dataset_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=args.shuffle)
    test_dataset = TrafficDataset(train=False)
    test_dataset_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    criterion = nn.MSELoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learn_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)

    writer = SummaryWriter(args.tensorboard)

    best = None

    for epoch in range(args.epochs):
        # train
        train(main_args, model, train_dataset_loader, criterion, optimizer, epoch, writer)
        # validation
        truth, predictions, valid_loss, valid_ppl = test(args, model, test_dataset_loader, criterion)

        if best is None:
            best = [valid_loss, valid_ppl]
        if valid_loss <= best[0]:
            best[0] = valid_loss
            best[1] = valid_ppl
            tqdm.write(f'model saving...')
            model_save_path = os.path.join(args.backup, f'{args.model}_best_{valid_loss:.5f}.pth')
            torch.save(model.state_dict(), model_save_path)

        writer.add_scalar("valid_loss", valid_loss, epoch)
        writer.add_scalar("valid_ppl", valid_ppl, epoch)
        PlotUtils.plot(truth, predictions, epoch)
        # scheduler
        scheduler.step()


if __name__ == "__main__":
    now_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    parser = argparse.ArgumentParser()
    parser.add_argument("--epochs", type=int, default=100)
    parser.add_argument("--learn_rate", "-lr", type=float, default=0.005)
    parser.add_argument("--device", type=str)
    parser.add_argument("--shuffle", type=bool, default=True)
    parser.add_argument("--batch_size", type=int, default=10)
    parser.add_argument("--have_decoder", type=bool, default=False)
    parser.add_argument("--model", type=str)
    parser.add_argument("--backup", type=str, default='backup/',
                        help="the directory to save model parameter")
    parser.add_argument("--tensorboard", type=str, default=os.path.join("tensorboard", now_time),
                        help="the directory to save tensorboard data")

    args = parser.parse_args()
    args.device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"You are using {args.device}......")

    with open("tensorboard/tensorboard.txt", 'w') as handle:
        handle.write(f'tensorboard --logdir={args.tensorboard}')

    main(args)
