import sys
sys.path.append("../../")
import gc
import fitlog
from torch.optim.lr_scheduler import LambdaLR
from transformers import AdamW
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertConfig
from transformers import BertTokenizer
from datareader import *
from metrics import *
from model import *
import pickle
import time

def Evaluate(
        model: torch.nn.Module,
        validation_evaluator: NLIEvaluator,
        best_acc,
        patience_counter: int,
        model_dir: str,
        domain_name: str
):
    (val_loss, acc, P, R, F1), _ = validation_evaluator.evaluate(model)
    print(f"Validation acc: {acc}")
    if acc > best_acc:
        best_acc = acc
        torch.save(model.state_dict(), f'{model_dir}/model_{domain_name}.pth')
        patience_counter = 0
        # Log to wandb
        fitlog.add_best_metric({
            'Validation accuracy': acc,
            'Validation Precision': P,
            'Validation Recall': R,
            'Validation F1': F1,
            'Validation loss': val_loss})
        print({
            'Validation accuracy': acc,
            'Validation Precision': P,
            'Validation Recall': R,
            'Validation F1': F1,
            'Validation loss': val_loss})
    else:
        patience_counter += 1
    return best_acc, patience_counter

def train(
        model: torch.nn.Module,
        train_loader: DataLoader,
        optimizer: torch.optim.Optimizer,
        scheduler: LambdaLR,
        validation_evaluator: NLIEvaluator,
        n_epochs: int,
        log_interval: int = 1,
        valid_interval: int = 100,
        patience: int = 10,
        model_dir: str = "wandb_local",
        gradient_accumulation: int = 1,
        domain_name: str = ''
):
    best_acc = 0.0
    patience_counter = 0
    epoch_counter = 0
    total = len(train_loader)
    while epoch_counter < n_epochs:
        with tqdm(total=total, desc="Training") as pbar:
            for idx, batch in enumerate(train_loader):
                optimizer.zero_grad()
                input_ids, masks, seg_ids, labels, domains = batch[0], batch[1], batch[2], \
                                                                batch[3], batch[4]
                rst = model(input_ids, token_ids=seg_ids, attention_mask=masks, domains=domains, labels=labels)
                loss, logits = rst.loss, rst.logits
                loss = loss / gradient_accumulation
                loss.backward()
                pbar.update(1)
                if (idx+1) % gradient_accumulation == 0:
                    optimizer.step()
                    if scheduler is not None:
                        scheduler.step()
                    gc.collect()
                if (idx+1) % log_interval == 0:
                    fitlog.add_metric({
                        "Loss": loss.item()
                    }, step=idx)
                    print({
                        "Loss": loss.item()
                    })
                if (idx+1) % valid_interval == 0:
                    best_acc, patience_counter = Evaluate(model,
                                                          validation_evaluator,
                                                          best_acc,
                                                          patience_counter,
                                                          model_dir,
                                                          domain_name)
                    if patience_counter > patience:
                        return
                    gc.collect()
        epoch_counter += 1


if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        args = pickle.load(fr)
    args.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = args.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


    # See if CUDA available
    device = torch.device("cpu")
    if args.n_gpu > 0 and torch.cuda.is_available():
        print("Training on GPU")
        device = torch.device("cuda:0")

    # model configuration
    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    print("====>", args.full_bert)
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)

    train_set = NLIDataset("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer)
    val_set = NLIDataset("../../../snli_1.0/snli_1.0_dev.jsonl", tokenizer=tokenizer)
    train_size, val_size = len(train_set), len(val_set)

    accs, Ps, Rs, F1s = [], [], [], []
    # Store labels and logits for individual splits for micro F1
    labels_all, logits_all = [], []
    train_loader = DataLoader(
        train_set,
        batch_size=32,
        shuffle=True,
        collate_fn=collate_SNLI_batch_with_device(torch.device("cuda:0"))
    )
    validation_evaluator = NLIEvaluator(val_set, torch.device("cuda:0"))

    log_dir = args.model_dir
    if not os.path.exists(log_dir):
        os.system("mkdir %s" % log_dir)
    else:
        os.system("rm -rf %s" % log_dir)
        os.system("mkdir %s" % log_dir)
    fitlog.set_log_dir(log_dir)
    fitlog.add_hyper({
            "epochs": n_epochs,
            "learning_rate": lr,
            "warmup": args.warmup_steps,
            "weight_decay": weight_decay,
            "batch_size": batch_size,
            "train_split_percentage": args.train_pct,
            "bert_model": bert_model,
            "seed": seed,
            "pretrained_model": args.pretrained_model,
            "tags": ",".join(args.tags)
        }, name=args.run_name)

    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(device)
                
    model = VanillaBert(bert).to(device)

    # Create the optimizer
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
         'weight_decay': weight_decay},
        {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=lr)
#         scheduler = get_linear_schedule_with_warmup(
#             optimizer,
#             args.warmup_steps,
#             n_epochs * len(train_loader)
#         )

    # Train
    train(
        model,
        train_loader,
        optimizer,
        None,
        validation_evaluator,
        3,
        args.log_interval,
        model_dir=args.model_dir,
        domain_name=None
    )

    # Load the best weights
#     model.load_state_dict(torch.load(f'{args.model_dir}/model_{domain}.pth'))

#     evaluator = ClassificationEvaluator(test_dset, device, use_domain=False)
#     (loss, acc, P, R, F1), plots, (labels, logits), votes = evaluator.evaluate(
#         model,
#         plot_callbacks=[plot_label_distribution],
#         return_labels_logits=True,
#         return_votes=True
#     )
#     print(f"{domain} F1: {F1}")
#     print(f"{domain} Accuracy: {acc}")

#     fitlog.add_best_metric({f"Validation_{domain}" :
#                                 {"valid_acc": acc,
#                                  "valid_prec": P,
#                                  "valid_recall": R,
#                                  "valid_f1": F1
#                                  }})
#     Ps.append(P)
#     Rs.append(R)
#     F1s.append(F1)
#     accs.append(acc)
#     labels_all.extend(labels)
#     logits_all.extend(logits)
#     with open(f'{args.model_dir}/pred_lab.txt', 'a+') as f:
#         for p, l in zip(np.argmax(logits, axis=-1), labels):
#             f.write(f'{domain}\t{p}\t{l}\n')

# acc, P, R, F1 = acc_f1(logits_all, labels_all)
# # Add to wandb
# fitlog.add_best_metric({f"Evaluation":
#                             {
#                              "test_micro_acc": acc,
#                              "test_micro_prec": P,
#                              "test_micro_recall": R,
#                              "test_micro_f1": F1
#                              }})

# fitlog.add_best_metric({f"Evaluation":
#                             {
#                              "test_micro_acc": sum(accs) / len(accs),
#                              "test_micro_prec": sum(Ps) / len(Ps),
#                              "test_micro_recall": sum(Rs) / len(Rs),
#                              "test_micro_f1": sum(F1s) / len(F1s)
#                              }})
#wandb.log({f"label-distribution-test-{i}": plots[0]})
#print({f"label-distribution-test-{i}": plots[0]})
