
import pprint
import random
import os
import datetime
import argparse

import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.nn.functional import cross_entropy
import torch.distributed as dist
from transformers import AutoTokenizer
import wandb

from trainer import Trainer
from utils import load_config, set_seed, print_measures, compute_measures, sample_data_statistic
from selector import target_words, data_selection, collator_selection, model_selection, forward_selection


# os.environ['CUDA_VISIBLE_DEVICES'] = "1"
# model_name_or_path = "../huggingface/roberta-base"

# start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

def run(use_wandb = True):
    start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    record_config = {
        "fp16": True,
        "train_by_dist": True,
        "task": "NLI",
        "backbone": "roberta-base",
        "eval_strategy": "epoch",
        "mode": "prompt-tune",
        "dataset": "glue-rte",
        "epochs": 6,
        "lr": 4e-5,
        "fine_tune_all": True,
        "mlm_pretrained": True,
        "compute_template": False,
        "alpha": 0,
        "beta": 0,
        "add_xavg": False,
    }

    if record_config['train_by_dist']:
        parser = argparse.ArgumentParser()
        parser.add_argument('--local_rank', default=-1, type=int,
                            help='node rank for distributed training')
        args = parser.parse_args()

        dist.init_process_group(backend='nccl')
        torch.cuda.set_device(args.local_rank)

    mode = record_config['mode']
    dataset = record_config['dataset']

    config = load_config(f"config/{dataset}.ini")['config']
    record_config.update(config)

    if use_wandb:
        wandb.init(config=record_config, project="unbias4glue")
        pprint.pprint(wandb.config)
        record_config = wandb.config

    if isinstance(record_config['seed'], int):
        seeds = [record_config['seed']]
    else:
        seeds = record_config['seed']

    positive_words = target_words[record_config['task']]['positive_words']
    negative_words = target_words[record_config['task']]['negative_words']

    tokenizer = AutoTokenizer.from_pretrained(record_config['backbone'])

    mask_token = tokenizer(tokenizer.mask_token)[
        'input_ids'][1]  # type: ignore

    # ignore the first and last token
    pos_tokens = tokenizer(" ".join(positive_words))[
        'input_ids'][1:-1]  # type: ignore
    neg_tokens = tokenizer(" ".join(negative_words))[
        'input_ids'][1:-1]  # type: ignore

    tokenized_collator = collator_selection(
        mode=mode, tokenizer=tokenizer, task=record_config['task'], use_learnable_token=record_config['use_learnable_token'])

    train_data, val_data = data_selection(
        data_path=None, dataset_name=dataset, domain=record_config['domain'] if dataset == "amazon" else None)
    
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_data, shuffle=True)
    val_sampler = torch.utils.data.distributed.DistributedSampler(val_data, shuffle=True)

    train_iter = DataLoader(dataset=train_data,
                            batch_size=record_config["batch_size"],
                            collate_fn=tokenized_collator,
                            sampler=train_sampler)

    val_iter = DataLoader(dataset=val_data,
                          batch_size=record_config["batch_size"],
                          collate_fn=tokenized_collator,
                          sampler=val_sampler)

    avg_loss = []
    avg_metrics = {"accuracy": [],
                   "bi_precision": [], "bi_recall": [], "bi_f1": [],
                   "micro_precision": [], "micro_recall": [], "micro_f1": [],
                   "macro_precision": [], "macro_recall": [], "macro_f1": [],
                   "weighted_precision": [], "weighted_recall": [], "weighted_f1": [],
                   "auc": []}
    start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    for i, seed in enumerate(seeds):
        set_seed(seed)
        print(f'>> seed {seed}')
        print('--------------------------------')

        if record_config['cuda'] and torch.cuda.is_available:
            torch.cuda.empty_cache()

        model = model_selection(mode,
                                config=record_config,
                                nums_label=record_config['nums_label'],
                                hidden_size=record_config['hidden_size'],
                                dropout=record_config['dropout'],
                                vocab_size=tokenizer.vocab_size,
                                special_token_ids={
                                    'mask_token_id': mask_token,
                                    'positive_token_ids': pos_tokens,
                                    'negative_token_ids': neg_tokens,
                                })
        if record_config['train_by_dist']:
            model = torch.nn.parallel.DistributedDataParallel(model.cuda(), device_ids=[args.local_rank])
        
        forward_calculator = forward_selection(
            mode, record_config, loss_fn=cross_entropy, metrics_fn=compute_measures)

        trainer = Trainer(config=record_config,
                          model=model,
                          foward_calculator=forward_calculator,
                          trainset_size=len(train_data),
                          batch_size=record_config['batch_size'],
                          warmup_ratio=record_config['warmup_ratio'],
                          report_to_wandb=use_wandb)

        # The most time-consuming operation --------------->
        _, last_model = trainer.train(train_iter=train_iter,
                                      val_iter=val_iter,
                                      model=model)
        # <---------------

        # torch.save(best_model, f"ckpt/{dataset}-{mode}-fold{fold}.pt")  # type: ignore
        model.load_state_dict(last_model)  # type: ignore
        test_loss, test_metrics = trainer.evaluate(   # val last epoch as test
            model, val_iter)  # type: ignore

        print("------------------------------------------")
        print("-Test: ")
        print_measures(test_loss, test_metrics)

        # save results for each seed
        with open("fewshot_res.csv", "a+") as f:
            save_str = ",".join([str(x) for x in test_metrics.values()])
            f.write(
                f"{start_time},roberta-{mode},{dataset},{test_loss}," + save_str + "\n")

        for k in avg_metrics.keys():
            avg_metrics[k].append(test_metrics[k])
        avg_loss.append(test_loss.cpu())

        model, trainer, best_model = None, None, None  # type: ignore

    std_metrics = {k: np.std(v, ddof=1) for k,   # type: ignore
                   v in avg_metrics.items()}
    avg_metrics = {k: np.mean(v) for k,   # type: ignore
                   v in avg_metrics.items()}
    avg_loss = np.mean(avg_loss)

    if use_wandb:
        wandb.log({"start_time": start_time,
                   "test_loss": avg_loss,
                   "test_accuracy": avg_metrics['accuracy'],
                   "test_macro_f1": avg_metrics['macro_f1'],
                   "test_micro_f1": avg_metrics['micro_f1'],
                   "test_acc_std": std_metrics['accuracy'],
                   "test_maf1_std": std_metrics['macro_f1'],
                   "test_mif1_std": std_metrics['micro_f1'],
                   })
        wandb.run.summary["test_macro_f1"] = avg_metrics['macro_f1']

    print("==========================================")
    print("-Seeds AVG: ")
    print_measures(avg_loss, avg_metrics)
    with open("fewshot_merge.csv", 'a+') as f:
        save_str = ",".join([str(x) for x in avg_metrics.values()])
        f.write(
            f"{start_time},roberta-{mode},{dataset},{avg_loss}," + save_str + "\n")


if __name__ == "__main__":
    # sweep_configuration = {
    #     "name": "fine-tune-<glue-rte> (roberta-large)",
    #     "metric": {"name": "test_accuracy", "goal": "maximize"},
    #     "method": "grid",
    #     "parameters": {
    #         "epochs": {
    #             "values": [8, 10, 13]
    #         },
    #         "lr": {
    #             "values": [2e-5, 1e-5, 5e-6, 3e-6]
    #         },
    #         "weight_decay": {
    #             "values": [1e-1, 1e-2, 1e-3]
    #         },
    #         # "mlm_pretrained": {
    #         #     "values": [True, False]
    #         # },
    #         # "alpha":{
    #         #     "values": [0.2, 0.6]
    #         # },
    #         # "beta":{
    #         #     "values": [0.5, 0.7]
    #         # },
    #         # "add_xavg":{
    #         #     "values": [True]
    #         # }
    #     }
    # }
    # sweep_id = wandb.sweep(sweep_configuration, project="unbias4glue", entity='zzoay')

    # # # run the sweep
    # wandb.agent(sweep_id, function=run)

    # os.system("shutdown")

    run(use_wandb=False)