import argparse
import os
import pickle
import time

import pandas as pd
import torch
import torch.nn as nn
from dataset import Triplet_dataset_with_mine_EC, mine_hard_negative
from model import LayerNormNet
from utils import (ensure_dirs, get_dist_map, get_ec_id_dict, seed_everything)

from utils.distance_map import *
from utils.evaluate import *
from utils.utils import *


def parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('-l', '--learning_rate', type=float, default=5e-4)
    parser.add_argument('-e', '--epoch', type=int, default=2000)
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('-n', '--model_name', type=str, default='split100_triplet')
    parser.add_argument('-m', '--model_dir', type=str, default='/state/partition/wzzheng/clean/model')

    parser.add_argument('--data_dir', type=str, default='/state/partition/wzzheng/clean/data/train_valid_split/split100')
    parser.add_argument('-t', '--training_data', type=str, default='split100_train_split_0')

    parser.add_argument('-d', '--hidden_dim', type=int, default=512)
    parser.add_argument('-o', '--out_dim', type=int, default=128)
    parser.add_argument('--batch_size', type=int, default=1000)
    parser.add_argument('--adaptive_rate', type=int, default=100)
    parser.add_argument('--verbose', type=bool, default=False)
    args = parser.parse_args()
    return args

def get_train_dataloader(dist_map, id_ec, ec_id, args):
    params = {
        'batch_size': args.batch_size,
        'shuffle': True,
    }
    negative = mine_hard_negative(dist_map, 30)
    train_data = Triplet_dataset_with_mine_EC(id_ec, ec_id, negative, data_dir=args.data_dir, training_data=args.training_data)
    train_loader = torch.utils.data.DataLoader(train_data, **params)
    return train_loader


def train(model, args, epoch, train_loader,
          optimizer, device, dtype, criterion):
    model.train()
    total_loss = 0.
    start_time = time.time()

    for batch, data in enumerate(train_loader):
        optimizer.zero_grad()
        anchor, positive, negative = data
        anchor_out = model(anchor.to(device=device, dtype=dtype))
        positive_out = model(positive.to(device=device, dtype=dtype))
        negative_out = model(negative.to(device=device, dtype=dtype))

        loss = criterion(anchor_out, positive_out, negative_out)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
        if args.verbose:
            lr = args.learning_rate
            ms_per_batch = (time.time() - start_time) * 1000
            cur_loss = total_loss 
            print(f'| epoch {epoch:3d} | {batch:5d}/{len(train_loader):5d} batches | '
                  f'lr {lr:02.4f} | ms/batch {ms_per_batch:6.4f} | '
                  f'loss {cur_loss:5.2f}')
            start_time = time.time()
    # record running average training loss
    return total_loss/(batch + 1)


def main():
    seed_everything()
    args = parse()
    ensure_dirs(args.model_dir)
    torch.backends.cudnn.benchmark = True

    #======================== override args ====================#
    use_cuda = torch.cuda.is_available()
    device = torch.device(args.device if use_cuda else "cpu")
    dtype = torch.float32
    lr, epochs = args.learning_rate, args.epoch
    model_name = args.model_name
    print('==> device used:', device, '| dtype used: ', dtype, "\n==> args:", args)

    training_datasets = ['split100_train_split_0', 'split100_train_split_1', 'split100_train_split_2', 'split100_train_split_3', 'split100_train_split_4']
    global_best_model_loss = float('inf')
    global_best_model_path = ''
    global_best_model_paths = []

    for training_data in training_datasets:
        args.training_data = training_data
        print(f'==> Training on {args.training_data}')

        #======================== get data  ===================#
        id_ec, ec_id_dict = get_ec_id_dict(os.path.join(args.data_dir, args.training_data + '.csv'))
        ec_id = {key: list(ec_id_dict[key]) for key in ec_id_dict.keys()}

        #======================== ESM embedding  ===================#
        # loading ESM embedding for dist map
        esm_emb = pickle.load(
            open(os.path.join(args.data_dir, 'distance_map', args.training_data + '_esm_emb.pkl'), 'rb')).to(device=device, dtype=dtype)
        dist_map = pickle.load(
            open(os.path.join(args.data_dir, 'distance_map', args.training_data + '_esm.pkl'), 'rb'))
        
        #======================== initialize model =================#
        model = LayerNormNet(args.hidden_dim, args.out_dim, device, dtype)
        optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))
        criterion = nn.TripletMarginLoss(margin=1, reduction='mean')
        best_loss = float('inf')
        train_loader = get_train_dataloader(dist_map, id_ec, ec_id, args)
        print("The number of unique EC numbers: ", len(dist_map.keys()))

        #======================== training =======-=================#
        print('==> Start training')
        best_model_path = os.path.join(args.model_dir, model_name + '_' + args.training_data + '.pth')
        for epoch in tqdm(range(1, epochs + 1)):
            if epoch % args.adaptive_rate == 0 and epoch != epochs + 1:
                optimizer = torch.optim.Adam(
                    model.parameters(), lr=lr, betas=(0.9, 0.999))
                # sample new distance map
                dist_map = get_dist_map(
                    ec_id_dict, esm_emb, device, dtype, model=model)
                train_loader = get_train_dataloader(dist_map, id_ec, ec_id, args)

            train_loss = train(model, args, epoch, train_loader,
                            optimizer, device, dtype, criterion)
            
            # save best model in this training data
            if (train_loss < best_loss and epoch > 0.8*epochs):
                print(f'==> Saving best model of {args.training_data} in {best_model_path} with loss {train_loss}')
                best_loss = train_loss
                torch.save(model.state_dict(), best_model_path)
        
        global_best_model_paths.append(best_model_path)
        
        if (best_loss < global_best_model_loss):
            print(f'==> The best model in {args.training_data} with loss {best_loss} is better than global best model')
            global_best_model_loss = best_loss
            global_best_model_path = best_model_path

    for path in global_best_model_paths:
            if path != global_best_model_path and os.path.exists(path):
                print(f'==> Removing temporary model in path :{path}')
                os.remove(path)
        
    print(f'The best model is {global_best_model_path} with loss {global_best_model_loss}.')


if __name__ == '__main__':
    main()
