import argparse
import os
import pickle
import time

import pandas as pd
import torch
import torch.nn as nn
from dataset import Triplet_dataset_with_mine_EC, mine_hard_negative
from model import LayerNormNet
from model.norm import LayerNormNet
from prostT5 import prostT5
from utils import (ensure_dirs, get_dist_map, get_ec_id_dict, seed_everything)

from utils.distance_map import *
from utils.evaluate import *
from utils.utils import *


def parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('-l', '--learning_rate', type=float, default=5e-4)
    parser.add_argument('-e', '--epoch', type=int, default=2000)
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('-n', '--model_name', type=str, default='split100_triplet')
    parser.add_argument('-m', '--model_dir', type=str, default='/state/partition/wzzheng/clean/model')

    parser.add_argument('--data_dir', type=str, default='/state/partition/wzzheng/clean/data/train_valid_split/split100')
    parser.add_argument('-t', '--training_data', type=str, default='split100_train_split_0')
    parser.add_argument('-v', '--valid_data', type=str, default='split100_test_split_0_curate')

    parser.add_argument('-d', '--hidden_dim', type=int, default=512)
    parser.add_argument('-o', '--out_dim', type=int, default=128)
    parser.add_argument('--batch_size', type=int, default=1000)
    parser.add_argument('--adaptive_rate', type=int, default=100)
    parser.add_argument('--verbose', type=bool, default=False)
    args = parser.parse_args()
    return args

def get_train_dataloader(dist_map, id_ec, ec_id, args):
    params = {
        'batch_size': args.batch_size,
        'shuffle': True,
    }
    negative = mine_hard_negative(dist_map, 30)
    train_data = Triplet_dataset_with_mine_EC(id_ec, ec_id, negative, data_dir=args.data_dir, training_data=args.training_data)
    train_loader = torch.utils.data.DataLoader(train_data, **params)
    return train_loader

def validate(model, args, dtype, train_data, validate_data, p_value = 1e-5, nk_random = 20,
                data_dir='/state/partition/wzzheng/clean/data/train_valid_split/split100',
                results_dir='/state/partition/wzzheng/clean/results'):
    seed_everything()
    model.eval()
    
    # load ec id dictionary
    id_ec_train, ec_id_dict_train = get_ec_id_dict(os.path.join(data_dir, train_data + '.csv'))
    id_ec_test, _ = get_ec_id_dict(os.path.join(data_dir, validate_data + '.csv'))

    # load training EC cluster center embeddings 
    emb_train = model(esm_embedding(ec_id_dict_train, args.device, dtype))

    # calculate distance map and p-value for test data
    emb_test = model_embedding_test(id_ec_test, model, args.device, dtype)
    print("The embedding sizes for train and test:", emb_train.size(), emb_test.size())
    import pdb; pdb.set_trace()
    eval_dist = get_dist_map_test(emb_train, emb_test, ec_id_dict_train, id_ec_test, args.device, dtype)
    eval_df = pd.DataFrame.from_dict(eval_dist)
    rand_nk_ids, rand_nk_emb_train = random_nk_model(
        id_ec_train, ec_id_dict_train, emb_train, n=nk_random, weighted=True)
    random_nk_dist_map = get_random_nk_dist_map(
        emb_train, rand_nk_emb_train, ec_id_dict_train, rand_nk_ids, args.device, dtype)

    # write p-value choices to file
    out_filename = os.path.join(results_dir, validate_data)
    write_pvalue_choices(eval_df, out_filename, random_nk_dist_map, p_value=p_value)

    # calculate validation metrics
    pred_label = get_pred_labels(out_filename, pred_type='_pvalue')
    pred_probs = get_pred_probs(out_filename, pred_type='_pvalue')
    true_label, all_label = get_true_labels(os.path.join(data_dir, validate_data + '.csv'))
    pre, rec, f1, roc, acc = get_eval_metrics(
        pred_label, pred_probs, true_label, all_label)

    return pre, rec, f1, roc, acc


def train(model, args, epoch, train_loader,
          optimizer, device, dtype, criterion):
    model.train()
    total_loss = 0.
    start_time = time.time()

    for batch, data in enumerate(train_loader):
        optimizer.zero_grad()
        anchor, positive, negative, _, _, _ = data
        anchor_out = model(anchor.to(device=device, dtype=dtype))
        positive_out = model(positive.to(device=device, dtype=dtype))
        negative_out = model(negative.to(device=device, dtype=dtype))

        loss = criterion(anchor_out, positive_out, negative_out)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
        if args.verbose:
            lr = args.learning_rate
            ms_per_batch = (time.time() - start_time) * 1000
            cur_loss = total_loss 
            print(f'| epoch {epoch:3d} | {batch:5d}/{len(train_loader):5d} batches | '
                  f'lr {lr:02.4f} | ms/batch {ms_per_batch:6.4f} | '
                  f'loss {cur_loss:5.2f}')
            start_time = time.time()
    # record running average training loss
    return total_loss/(batch + 1)


def main():
    seed_everything()
    args = parse()
    ensure_dirs(args.model_dir)
    torch.backends.cudnn.benchmark = True

    #======================== Get Data  ===================#
    # id_ec中的格式为'C4Z3P0': ['4.1.2.4'], ec_id_dict'7.3.2.6': {'Q8U4K3', 'Q93KD4', 'Q9V2C0', 'Q0P887', 'O30144', 'O57896'}
    id_ec, ec_id_dict = get_ec_id_dict(os.path.join(args.data_dir, args.training_data + '.csv'))
    ec_id = {key: list(ec_id_dict[key]) for key in ec_id_dict.keys()}

    #======================== override args ====================#
    use_cuda = torch.cuda.is_available()
    device = torch.device(args.device if use_cuda else "cpu")
    dtype = torch.float32
    lr, epochs = args.learning_rate, args.epoch
    model_name = args.model_name
    print('==> device used:', device, '| dtype used: ', dtype, "\n==> args:", args)
    
    #======================== ESM embedding  ===================#
    # loading ESM embedding for dist map
    esm_emb = pickle.load(
        open(os.path.join(args.data_dir, 'distance_map', args.training_data + '_esm_emb.pkl'), 'rb')).to(device=device, dtype=dtype)
    dist_map = pickle.load(
        open(os.path.join(args.data_dir, 'distance_map', args.training_data + '_esm.pkl'), 'rb'))
    
    #======================== initialize model =================#
    model = LayerNormNet(args.hidden_dim, args.out_dim, device, dtype)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))
    criterion = nn.TripletMarginLoss(margin=1, reduction='mean')
    best_loss = float('inf')
    train_loader = get_train_dataloader(dist_map, id_ec, ec_id, args)
    print("The number of unique EC numbers: ", len(dist_map.keys()))

    #======================== training =======-=================#
    for epoch in range(1, epochs + 1):
        if epoch % args.adaptive_rate == 0 and epoch != epochs + 1:
            optimizer = torch.optim.Adam(
                model.parameters(), lr=lr, betas=(0.9, 0.999))
            # save updated model
            torch.save(model.state_dict(), os.path.join(
                args.model_dir, model_name + '_' + str(epoch) + '.pth'))
            # delete last model checkpoint
            if epoch != args.adaptive_rate:
                os.remove(os.path.join(args.model_dir, model_name + '_' + str(epoch - args.adaptive_rate) + '.pth'))
            # sample new distance map
            dist_map = get_dist_map(
                ec_id_dict, esm_emb, device, dtype, model=model)
            train_loader = get_train_dataloader(dist_map, id_ec, ec_id, args)
        # -------------------------------------------------------------------- #
        epoch_start_time = time.time()
        train_loss = train(model, args, epoch, train_loader,
                           optimizer, device, dtype, criterion)
        valid_metrics = validate(model, args, dtype, args.training_data, args.valid_data)
        print(f'>>> Precision: {valid_metrics[0]:.3} | Recall: {valid_metrics[1]:.3}'
          f'| F1 Score: {valid_metrics[2]:.3} | ROC AUC: {valid_metrics[3]:.3} '
          f'| Accuracy: {valid_metrics[4]:.3}')
        # only save the current best model near the end of training
        if (train_loss < best_loss and epoch > 0.8*epochs):
            torch.save(model.state_dict(), os.path.join(args.model_dir, model_name + '.pth'))
            best_loss = train_loss
            print(f'Best from epoch : {epoch:3d}; loss: {train_loss:6.4f}')

        elapsed = time.time() - epoch_start_time
        print('-' * 75)
        print(f'| end of epoch {epoch:3d} | time: {elapsed:5.2f}s | '
              f'training loss {train_loss:6.4f}')
        print('-' * 75)
    # remove tmp save weights
    os.remove(os.path.join(args.model_dir, model_name + '.pth'))
    os.remove(os.path.join(args.model_dir, model_name + '_' + str(epoch) + '.pth'))
    # save final weights
    torch.save(model.state_dict(), os.path.join(args.model_dir, model_name + '_final.pth'))


if __name__ == '__main__':
    main()
