import argparse
import os
import time

import torch
import torch.nn as nn
from dataset import Compare_dataset
from model import CNN, LSTM, RNN
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils import ensure_dirs, get_ec_id_dict, seed_everything
from utils.distance_map import *
from utils.evaluate import *
from utils.utils import *


def parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('-l', '--learning_rate', type=float, default=5e-4)
    parser.add_argument('-e', '--epoch', type=int, default=1000)
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('-n', '--model_name', type=str, default='cnn')
    parser.add_argument('-m', '--model_dir', type=str, default='/state/partition/wzzheng/clean/model')

    parser.add_argument('--data_dir', type=str, default='/state/partition/wzzheng/clean/data/train_valid_split/split100')
    parser.add_argument('-t', '--training_data', type=str, default='split100_train_split_0')

    parser.add_argument('-d', '--hidden_dim', type=int, default=512)
    parser.add_argument('-o', '--out_dim', type=int, default=128)
    parser.add_argument('--batch_size', type=int, default=1000)
    parser.add_argument('--adaptive_rate', type=int, default=100)
    parser.add_argument('--verbose', type=bool, default=False)
    args = parser.parse_args()
    return args


def train(model, args, epoch, train_loader, optimizer, device, dtype, criterion):
    model.train()
    total_loss = 0.
    start_time = time.time()

    for batch, (data, labels) in enumerate(train_loader): 
        optimizer.zero_grad()
        data = data.to(device=device, dtype=dtype)
        labels = labels.to(device=device, dtype=dtype) 

        output = model(data)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
        if args.verbose:
            lr = args.learning_rate
            ms_per_batch = (time.time() - start_time) * 1000
            cur_loss = total_loss 
            print(f'| epoch {epoch:3d} | {batch:5d}/{len(train_loader):5d} batches | '
                  f'lr {lr:02.4f} | ms/batch {ms_per_batch:6.4f} | '
                  f'loss {cur_loss:5.2f}')
            start_time = time.time()
    # record running average training loss
    return total_loss/(batch + 1)


def main():
    seed_everything()
    args = parse()
    ensure_dirs(args.model_dir)
    torch.backends.cudnn.benchmark = True

    #======================== override args ====================#
    use_cuda = torch.cuda.is_available()
    device = torch.device(args.device if use_cuda else "cpu")
    dtype = torch.float32
    lr, epochs = args.learning_rate, args.epoch
    model_name = args.model_name
    print('==> device used:', device, '| dtype used: ', dtype, "\n==> args:", args)

    training_datasets = ['split100_train_split_0']
    global_best_model_loss = float('inf')
    global_best_model_path = ''
    global_best_model_paths = []

    for training_data in training_datasets:
        args.training_data = training_data
        print(f'==> Training on {args.training_data}')

        #======================== get data  ===================#
        id_ec, ec_id_dict = get_ec_id_dict(os.path.join(args.data_dir, args.training_data + '.csv'))
        ec_id = {key: list(ec_id_dict[key]) for key in ec_id_dict.keys()}

        #======================== get data loader ===================#
        dataset = Compare_dataset(id_ec, ec_id)  # Create Compare_dataset
        train_loader = DataLoader(dataset, batch_size=args.batch_size)  # Create DataLoader

        #======================== get model ===================#
        if model_name == 'cnn':
            model = CNN(1280, args.hidden_dim, 5242)  # Change the output dimension to num_classes
        elif model_name == 'rnn':
            model = RNN(1280, args.hidden_dim, 5242)
        elif model_name == 'lstm':
            model = LSTM(1280, args.hidden_dim, 5242)
        model = model.to(device=device, dtype=dtype)

        #======================== get loss function ===================#
        criterion = nn.BCELoss()  # Change to BCEWithLogitsLoss for multi-label classification

        #======================== get optimizer ===================#
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)

        #======================== train model ===================#
        best_loss = float('inf')
        best_model_path = os.path.join(args.model_dir, model_name + '_' + args.training_data + '.pth')
        pbar = tqdm(range(1, epochs + 1), desc="Epochs")
        for epoch in pbar:
            train_loss = train(model, args, epoch, train_loader, optimizer, device, dtype, criterion)

            # save best model in this training data
            if (train_loss < best_loss and epoch > 0.8*epochs):
                print(f'==> Saving best model of {args.training_data} in {best_model_path} with loss {train_loss}')
                best_loss = train_loss
                torch.save(model.state_dict(), best_model_path)

            # Add loss to tqdm progress bar
            pbar.set_postfix({'loss': train_loss})

        global_best_model_paths.append(best_model_path)

        if (best_loss < global_best_model_loss):
            print(f'==> The best model in {args.training_data} with loss {best_loss} is better than global best model')
            global_best_model_loss = best_loss
            global_best_model_path = best_model_path

    for path in global_best_model_paths:
        if path != global_best_model_path and os.path.exists(path):
            print(f'==> Removing temporary model in path :{path}')
            os.remove(path)

    print(f'The best model is {global_best_model_path} with loss {global_best_model_loss}.')


if __name__ == '__main__':
    main()