from config import Constant
from rnn_model import *
from tqdm import tqdm
from dataset import generate_dataset, MyDataset
from torch.utils.data import DataLoader
from utils import PlotUtils
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from torch.utils.tensorboard import SummaryWriter


def get_model(args):
    """模型实例化"""
    model_name = Constant.MODELS[Constant.MODEL_CHOOSE]
    print(f'you are using {model_name}...')
    args.model = model_name
    if model_name == 'transformer_encoder':
        return TransAm(n_inputs=Constant.N_INPUT).to(args.device)
    elif model_name == 'LSTM':
        return LSTMModel(n_input=Constant.N_INPUT).to(args.device).apply(lstm_init_weights)
    elif model_name == 'transformer':
        args.have_decoder = True
        return Transformer(n_encoder_inputs=Constant.N_INPUT, n_decoder_inputs=Constant.N_INPUT).to(args.device)
    else:
        raise ValueError


def train(args, model, data_loader, criterion, optimizer, epoch, writer):
    model.train()
    batch_len = len(data_loader)

    process_bar = tqdm(total=batch_len)
    for idx, (data, label, tgt_in) in enumerate(data_loader):
        process_bar.update(1)
        data, label = data.to(args.device).float(), label.to(args.device).float()
        # label = label.reshape((label.shape[0], label.shape[1], -1))
        # label = label.permute((1, 0, 2))
        optimizer.zero_grad()
        if args.have_decoder:
            tgt_in = tgt_in.to(args.device).float()
            output = model(data, tgt_in)
        else:
            output = model(data)
        loss = criterion(output, label)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.7)
        optimizer.step()

        writer.add_scalar("train_loss", loss.item(), epoch * batch_len + idx)

        if idx % 10 == 0:
            tqdm.write(f'[epoch:{epoch:>3d}] loss:{loss.item():.7f} .')


def test(args, model, data_loader, criterion):
    model.eval()
    total_loss = 0.
    predictions = torch.Tensor(0)
    truth = torch.Tensor(0)
    with torch.no_grad():
        for data, label, tgt_in in data_loader:
            data, label = data.to(args.device).float(), label.to(args.device).float()
            # label = label.reshape((label.shape[0], label.shape[1], -1))
            # label = label.permute((1, 0, 2))
            if args.have_decoder:
                tgt_in = tgt_in.to(args.device).float()
                output = model(data, tgt_in)
            else:
                output = model(data)
            total_loss += criterion(output, label).item()
            predictions = torch.cat((predictions, output[:, -Constant.OUTPUT_WINDOW].view(-1).cpu()), 0)
            truth = torch.cat((truth, label[:, -Constant.OUTPUT_WINDOW].view(-1).cpu()), 0)

        loss = total_loss / len(data_loader)
        tqdm.write(f'valid loss : {loss:.5f}')
        tqdm.write("-" * 100)
    return truth, predictions, loss


def main(args):
    # 获取模型
    model = LSTMModel(n_input=Constant.N_INPUT).to(args.device).\
        apply(lstm_init_weights)

    train_ds, test_ds, min_max = generate_dataset()
    train_dataset = MyDataset(train_ds)
    train_dataset_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=args.shuffle)
    test_dataset = MyDataset(test_ds)
    test_dataset_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    criterion = nn.MSELoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learn_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)

    writer = SummaryWriter('tensorboard')

    for epoch in range(args.epochs):
        # train
        train(args, model, train_dataset_loader, criterion, optimizer, epoch, writer)
        # validation
        truth, predictions, valid_loss = test(args, model, test_dataset_loader, criterion)

        truth = min_max.inverse_transform(truth.reshape((-1, 1)))
        predictions = min_max.inverse_transform(predictions.reshape((-1, 1)))
        PlotUtils.plot(truth, predictions, epoch)
        # print('truth:', truth.flatten())
        print('truth:', ",".join(str(i) for i in truth.flatten()))
        print('prediction:', ",".join(str(i) for i in predictions.flatten()))
        # scheduler
        scheduler.step()

        writer.add_scalar("valid_loss", valid_loss, epoch)


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("--epochs", type=int, default=100)
    parser.add_argument("--learn_rate", "-lr", type=float, default=0.005)
    parser.add_argument("--device", type=str)
    parser.add_argument("--shuffle", type=bool, default=False)
    parser.add_argument("--batch_size", type=int, default=10)
    parser.add_argument("--have_decoder", type=bool, default=False)
    parser.add_argument("--model", type=str)

    args = parser.parse_args()
    args.device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"You are using {args.device}......")

    main(args)
