import torch
import transformers
from transformers import AutoTokenizer
import numpy as np
from pathlib import Path
from torch.utils.data import (
    DataLoader,
    RandomSampler,
    DistributedSampler,
    SequentialSampler,
)
from tqdm import tqdm
from torch.distributed import destroy_process_group
import os


from src.options import Options
import src.slurm
import src.util
import src.evaluation
import src.data
from src.model import FiDT5

from loguru import logger


def train(
    model,
    optimizer,
    scheduler,
    step,
    train_dataset,
    eval_dataset,
    opt,
    collator,
    best_dev_em,
    checkpoint_path,
):
    # different seed for different sampling depending on global_rank
    torch.manual_seed(opt.global_rank + opt.seed)

    train_dataloader = DataLoader(
        train_dataset,
        sampler=DistributedSampler(train_dataset, seed=42, shuffle=True),
        batch_size=opt.per_gpu_batch_size,
        drop_last=True,
        num_workers=10,
        collate_fn=collator,
    )

    loss, curr_loss = 0.0, 0.0
    epoch = 1

    gpu_id = int(os.environ["LOCAL_RANK"])
    print("gpu_id: ", gpu_id)
    # model = model.to(gpu_id)
    # model = DDP(model, device_ids=[gpu_id])

    # print(opt.eval_freq)
    # exit()

    model.train()
    pbar = tqdm(total=opt.total_steps)
    while step < opt.total_steps:
        epoch += 1
        pbar.set_description(f"Epoch {epoch}")
        for i, batch in enumerate(train_dataloader):
            pbar.update(1)

            step += 1
            (idx, labels, _, context_ids, context_mask) = batch

            train_loss = model(
                input_ids=context_ids.to(gpu_id),
                attention_mask=context_mask.to(gpu_id),
                labels=labels.to(gpu_id),
            )[0]

            train_loss = (
                train_loss / opt.accumulation_steps
            )  # add this to average the loss
            train_loss.backward()

            if step % opt.accumulation_steps == 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
                optimizer.step()
                scheduler.step()
                model.zero_grad()

            train_loss = src.util.average_main(train_loss, opt)
            curr_loss += train_loss.item()

            pbar.set_postfix_str(f"train_loss: {train_loss.item():.4f}")

            if step % opt.eval_freq == 0:
                dev_em = evaluate(model, eval_dataset, tokenizer, collator, opt)
                model.train()
                if gpu_id == 0:
                    if dev_em > best_dev_em:
                        best_dev_em = dev_em
                        src.util.save(
                            model,
                            optimizer,
                            scheduler,
                            step,
                            best_dev_em,
                            opt,
                            checkpoint_path,
                            "best_dev",
                        )
                    log = f"{step} / {opt.total_steps} |"
                    log += f"train: {curr_loss/opt.eval_freq:.3f} |"
                    log += f"evaluation: {100*dev_em:.2f}EM |"
                    log += f"lr: {scheduler.get_last_lr()[0]:.5f}"
                    logger.info(log)

                    curr_loss = 0.0

            if gpu_id == 0 and step % opt.save_freq == 0:
                src.util.save(
                    model,
                    optimizer,
                    scheduler,
                    step,
                    best_dev_em,
                    opt,
                    checkpoint_path,
                    f"step-{step}",
                )
            if step > opt.total_steps:
                break
    destroy_process_group()


def evaluate(model, dataset, tokenizer, collator, opt):
    gpu_id = int(os.environ["LOCAL_RANK"])
    sampler = SequentialSampler(dataset)
    dataloader = DataLoader(
        dataset,
        sampler=sampler,
        batch_size=opt.per_gpu_batch_size,
        drop_last=False,
        num_workers=10,
        collate_fn=collator,
    )
    model.eval()
    total = 0
    exactmatch = []
    model = model.module if hasattr(model, "module") else model
    with torch.no_grad():
        for i, batch in enumerate(dataloader):
            (idx, _, _, context_ids, context_mask) = batch

            outputs = model.generate(
                input_ids=context_ids.to(gpu_id),
                attention_mask=context_mask.to(gpu_id),
                max_length=opt.answer_maxlength,
            )

            for k, o in enumerate(outputs):
                ans = tokenizer.decode(o, skip_special_tokens=True)
                gold = dataset.get_example(idx[k])["answers"]
                # ans: str; gold: list of str
                score = src.evaluation.ems(ans, gold)
                total += 1
                exactmatch.append(score)

    exactmatch, total = src.util.weighted_average(np.mean(exactmatch), total, opt)
    return exactmatch


if __name__ == "__main__":
    options = Options()
    options.add_reader_options()
    options.add_optim_options()
    options.parser.add_argument("--total_batch_size", type=int, default=16)
    opt = options.parse()
    opt.local_rank = int(os.environ["LOCAL_RANK"])

    torch.manual_seed(opt.seed)
    src.slurm.init_distributed_mode(opt)
    src.slurm.init_signal_handler()

    # set training steps according to total GPU numbers
    step_scale_size = opt.total_batch_size // opt.world_size // opt.per_gpu_batch_size

    opt.total_steps *= step_scale_size
    opt.accumulation_steps *= step_scale_size
    opt.save_freq *= step_scale_size
    opt.eval_freq *= step_scale_size

    if opt.local_rank == 0:
        logger.info(f"step_scale_size: {step_scale_size}")
        logger.info(f"opt.total_steps: {opt.total_steps}")
        logger.info(f"opt.accumulation_steps: {opt.accumulation_steps}")
        logger.info(f"opt.save_freq: {opt.save_freq}")
        logger.info(f"opt.eval_freq: {opt.eval_freq}")

    model_name = "t5-" + opt.model_size

    # checkpoint_dir/{dataset}-{small/large}/
    dataset_name = "WebQSP" if "WebQSP" in opt.train_data else "CWQ"
    dataset_name = "Merge" if "merge_" in opt.train_data else dataset_name
    
    checkpoint_path = Path(opt.checkpoint_dir) / f"{dataset_name}-{model_name}"

    checkpoint_exists = False
    if opt.is_distributed:
        torch.distributed.barrier()
    checkpoint_path.mkdir(parents=True, exist_ok=True)

    # load data
    tokenizer = AutoTokenizer.from_pretrained(
        "/home/xionggm/codes/decode-answer-logical-form/PLMs/" + model_name, verbose=False
    )
    collator = src.data.Collator(
        opt.text_maxlength, tokenizer, answer_maxlength=opt.answer_maxlength
    )

    # use golbal rank and world size to split the eval set on multiple gpus
    train_examples = src.data.load_data(
        opt.train_data,
        global_rank=opt.global_rank,
        world_size=opt.world_size,
    )
    train_dataset = src.data.Dataset(train_examples, opt.n_context)

    # use golbal rank and world size to split the eval set on multiple gpus
    eval_examples = src.data.load_data(
        opt.eval_data,
        global_rank=opt.global_rank,
        world_size=opt.world_size,
    )
    eval_dataset = src.data.Dataset(eval_examples, opt.n_context)

    model_class = FiDT5
    if not checkpoint_exists and opt.model_path == "none":
        t5 = transformers.T5ForConditionalGeneration.from_pretrained(
            "/home/xionggm/codes/decode-answer-logical-form/PLMs/" + model_name,
            return_dict=False,
        )
        model = FiDT5(t5.config)
        model.load_t5(t5.state_dict())
        model = model.to(opt.local_rank)
        optimizer, scheduler = src.util.set_optim(opt, model)
        step, best_dev_em = 0, 0.0
    elif opt.model_path == "none":
        load_path = checkpoint_path / "checkpoint" / "latest"
        model, optimizer, scheduler, opt_checkpoint, step, best_dev_em = src.util.load(
            model_class, load_path, opt, reset_params=False
        )
        logger.info(f"Model loaded from {load_path}")
    else:
        model, optimizer, scheduler, opt_checkpoint, step, best_dev_em = src.util.load(
            model_class, opt.model_path, opt, reset_params=True
        )
        logger.info(f"Model loaded from {opt.model_path}")

    model.set_checkpoint(opt.use_checkpoint)

    if opt.is_distributed:
        if opt.local_rank == 0:
            logger.info("Distributed training DDP")
        model = torch.nn.parallel.DistributedDataParallel(
            model,
            device_ids=[opt.local_rank],
            output_device=opt.local_rank,
        )

    train(
        model,
        optimizer,
        scheduler,
        step,
        train_dataset,
        eval_dataset,
        opt,
        collator,
        best_dev_em,
        checkpoint_path,
    )
