import os
import time
import warnings

import argparse
import nltk
import torch
import tqdm
from accelerate import Accelerator
from torch.utils.data import DataLoader, RandomSampler
from transformers import logging

from benchmark import SummaCBenchmark
from model_summac import SummaCConv, model_map, split_sentences
from utils_optim import build_optimizer

logging.set_verbosity_error()
warnings.filterwarnings("ignore")
accelerator = Accelerator()


def train(
        model="mnli", granularity="sentence", nli_labels="e", num_epochs=5, optimizer="adam", train_batch_size=32,
        learning_rate=0.1, bins="even50", silent=False, norm_histo=False
):
    experiment = "%s_%s_%s_%s" % (model, granularity, bins, nli_labels)

    if not silent:
        print("Experiment name: %s" % experiment)

    device = accelerator.device

    if model == "multi":
        models = ["mnli", "anli", "vitc"]
    elif model == "multi2":
        models = ["mnli", "vitc", "vitc-only", "vitc-base"]
    else:
        models = [model]

    model_dir = "../models/summac/"
    files = os.listdir(model_dir)
    best_file = ""
    for file in files:
        if file.startswith(experiment):
            best_file = max(best_file, file)
    best_file = os.path.join(model_dir, best_file)

    model = SummaCConv(
        models=models, granularity=granularity, nli_labels=nli_labels,
        device=device, bins=bins, norm_histo=norm_histo, start_file=best_file
    )

    optimizer = build_optimizer(model, learning_rate=learning_rate, optimizer_name=optimizer)
    if not silent:
        print("Model Loaded")

    def collate_fn(inps):
        documents, claims, labels = [], [], []
        for inp in inps:
            if len(split_sentences(inp["claim"])) > 0 and len(split_sentences(inp["document"])) > 0:
                documents.append(inp["document"])
                claims.append(inp["claim"])
                labels.append(inp["label"])
        labels = torch.LongTensor(labels).to(device)
        return documents, claims, labels

    train_benchmark = SummaCBenchmark(cut="train", dataset_names=["factcc"])
    val_benchmark = SummaCBenchmark(cut="val", dataset_names=["factcc"])

    d_train = train_benchmark.datasets[0]['dataset']
    dl_train = DataLoader(
        dataset=d_train, batch_size=train_batch_size, sampler=RandomSampler(d_train), collate_fn=collate_fn
    )

    if not silent:
        print("Length of dataset. [Training: %d]" % (len(d_train)))

    criterion = torch.nn.CrossEntropyLoss()
    eval_every = 200
    best_val_score = 0.0
    best_file = ""

    model, optimizer, dl_train = accelerator.prepare(model, optimizer, dl_train)
    for epoch in range(num_epochs):
        ite = enumerate(dl_train)
        if not silent:
            ite = tqdm.tqdm(ite, total=len(dl_train))
        for ib, batch in ite:
            documents, claims, batch_labels = batch
            logits, _, _ = model(originals=documents, generateds=claims)
            loss = criterion(logits, batch_labels)

            accelerator.backward(loss)
            optimizer.step()
            optimizer.zero_grad()

            # 每隔eval_every轮，记录一次Benchmark Score，保存一次模型参数
            if ib % eval_every == eval_every - 1:
                eval_time = time.time()
                benchmark = val_benchmark.evaluate(model.module)
                # benchmark = val_benchmark.evaluate(model)

                val_score = benchmark["overall_score"]
                eval_time = time.time() - eval_time
                if eval_time > 10.0:
                    model.module.save_imager_cache()
                    # model.save_imager_cache()

                if not silent:
                    ite.set_description("[Benchmark Score: %.3f]" % (val_score))
                if val_score > best_val_score:
                    best_val_score = val_score
                    if len(best_file) > 0 and os.path.isfile(best_file):
                        os.remove(best_file)
                    best_file = "../models/summac/%s_bacc%.3f.bin" % (experiment, best_val_score)
                    torch.save(model.module.state_dict(), best_file)
                    # torch.save(model.state_dict(), best_file)
                    if not silent:
                        for t in benchmark["benchmark"]:
                            print("[%s] Score: %.3f (thresh: %.3f)" % (t["name"].ljust(10), t["score"], t["threshold"]))
    return best_val_score


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    model_choices = list(model_map.keys()) + ["multi", "multi2"]

    parser.add_argument("--model", type=str, choices=model_choices, default="mnli")
    parser.add_argument("--granularity", type=str, choices=["sentence", "paragraph", "mixed", "2sents"],
                        default="sentence")
    parser.add_argument("--bins", type=str, default="percentile",
                        help="How should the bins of the histograms be decided (even%d or percentile)")
    parser.add_argument("--nli_labels", type=str, default="e", choices=["e", "c", "n", "ec", "en", "cn", "ecn"],
                        help="Which of the three labels should be used in the creation of the histogram")

    parser.add_argument("--num_epochs", type=int, default=5, help="Number of passes over the data.")
    parser.add_argument("--optimizer", type=str, choices=["adam", "sgd"], default="adam")
    parser.add_argument("--train_batch_size", type=int, default=32, help="Training batch size.")
    parser.add_argument("--learning_rate", type=float, default=1e-2, help="Number of passes over the data.")
    parser.add_argument("--norm_histo", action="store_true",
                        help="Normalize the histogram to be between 0 and 1, and include the explicit count")

    args = parser.parse_args()
    train(**args.__dict__)
