import argparse
import logging
import math
import os

import datasets
import torch
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import load_dataset, load_from_disk
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
    DataCollatorForLanguageModeling,
    AutoConfig,
    CONFIG_MAPPING,
    MODEL_MAPPING,
    AutoTokenizer,
    get_scheduler
)
from transformers.utils.versions import require_version

from model import BartForTextInfilling
from utils import preprocess, text_infilling

logger = get_logger(__name__, log_level="DEBUG")
require_version("datasets>=1.8.0", "To fix: pip install -r requirements.txt")
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)


def parse_args():
    """
    加载控制台参数:
    --data_path: 数据集的路径
    --model_name_or_path: 模型名称（在huggingface hub上的标识）或模型路径
    --config_name: 模型的设置名称
    --tokenizer_name: 分词器名称
    --mask_probability: mask的概率，每个单词都有一定概率被mask
    --model_type: 模型类型，包括bart，bert等
    --max_seq_length: 最长序列长度
    --checkpointing_steps: 检查点步数，训练中经过一定步数后保存模型检查点
    --resume_from_checkpoint： 从检查点回复模型参数，继续训练
    :return: args
    """
    parser = argparse.ArgumentParser(description="Finetune a bart model on a Text Infilling task")
    parser.add_argument('--data_path', type=str, help='Path to the dataset', default="bookcorpus")
    parser.add_argument(
        "--model_name_or_path",
        type=str,
        help="Path to pretrained model or model identifier from huggingface.co/models.",
        required=False,
    )
    parser.add_argument(
        "--config_name",
        type=str,
        default=None,
        help="Pretrained config name or path if not the same as model_name",
    )
    parser.add_argument(
        "--tokenizer_name",
        type=str,
        default="facebook/bart-base",
        help="Pretrained tokenizer name or path if not the same as model_name",
    )
    parser.add_argument(
        "--mask_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
    )
    parser.add_argument(
        "--model_type",
        type=str,
        default=None,
        help="Model type to use if training from scratch.",
        choices=MODEL_TYPES,
    )
    parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
    parser.add_argument(
        "--max_seq_length",
        type=int,
        default=256,
        help=(
            "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated."
        ),
    )
    parser.add_argument(
        "--checkpointing_steps",
        type=int,
        default=500,
        help="The various states should be saved at the end of every n steps.",
    )
    parser.add_argument(
        "--resume_from_checkpoint",
        type=str,
        default=None,
        help="If the training should continue from a checkpoint folder.",
    )
    parser.add_argument(
        "--log_dir",
        type=str,
        default="./tensorboard",
        help="Tensorboard log directory.",
    )
    args = parser.parse_args()
    return args


def main():
    args = parse_args()

    # Initialize the accelerator.
    accelerator = Accelerator(log_with=["tensorboard"], project_dir=args.log_dir)

    # Make one log on every process with the configuration for debugging.
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.info(accelerator.state, main_process_only=False)

    if accelerator.is_local_main_process:
        datasets.utils.logging.set_verbosity_warning()
        transformers.utils.logging.set_verbosity_info()
    else:
        datasets.utils.logging.set_verbosity_error()
        transformers.utils.logging.set_verbosity_error()

    if accelerator.is_main_process:
        if args.output_dir:
            os.makedirs(args.output_dir, exist_ok=True)
    accelerator.wait_for_everyone()

    # 加载数据集
    if accelerator.is_main_process:
        if not os.path.exists(args.data_path):
            load_dataset(path="bookcorpus", split="train").save_to_disk(args.data_path)
    accelerator.wait_for_everyone()

    dataset = load_from_disk(args.data_path)

    train_dataset = dataset.select(range(100000))
    eval_dataset = dataset.select(range(100000, 101000))

    # 加载config
    if args.config_name:
        config = AutoConfig.from_pretrained(args.config_name)
    elif args.model_name_or_path:
        config = AutoConfig.from_pretrained(args.model_name_or_path)
    else:
        config = CONFIG_MAPPING[args.model_type]()
        logger.warning("You are instantiating a new config instance from scratch.")

    # 加载tokenizer
    if args.tokenizer_name:
        tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)
    elif args.model_name_or_path:
        tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
    else:
        raise ValueError(
            "You are instantiating a new tokenizer from scratch. This is not supported by this script."
            "You can do it from another script, save it, and load it from here, using --tokenizer_name."
        )

    # 加载model
    if args.model_name_or_path:
        model = BartForTextInfilling.from_pretrained(
            args.model_name_or_path,
            config=config
        )
    else:
        logger.info("Training new model from scratch")
        model = BartForTextInfilling(config=config)

    # 数据集映射
    with accelerator.main_process_first():
        train_dataset = train_dataset.map(lambda data: preprocess(data, tokenizer, args), batched=True,
                                          num_proc=4).remove_columns("text")
        eval_dataset = eval_dataset.map(lambda data: preprocess(data, tokenizer, args), batched=True,
                                        num_proc=4).remove_columns("text")
        # logger.debug("train_dataset[0] =" + str(train_dataset[0]))

    data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)

    # 创建DataLoader:
    train_dataloader = DataLoader(
        train_dataset, shuffle=True, collate_fn=data_collator, batch_size=8
    )
    eval_dataloader = DataLoader(
        eval_dataset, collate_fn=data_collator, batch_size=8
    )

    # Optimizer
    # Split weights in two groups, one with weight decay and the other not.
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
            "weight_decay": 0.01,
        },
        {
            "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
            "weight_decay": 0.0,
        },
    ]
    optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=2e-5)

    # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
    # shorter in multiprocess)

    # Scheduler and math around the number of training steps.
    num_update_steps_per_epoch = math.ceil(len(train_dataloader))  # 6250
    max_train_steps = 3 * num_update_steps_per_epoch  # 18750

    lr_scheduler = get_scheduler(
        name="linear",
        optimizer=optimizer,
        num_warmup_steps=0,
        num_training_steps=3
    )

    # Prepare everything with our `accelerator`.
    model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
        model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
    )

    accelerator.init_trackers("new_model")

    logger.info("***** Running training *****")
    logger.info(f"  Num examples = {len(train_dataset)}")
    # Only show the progress bar once on each machine.
    progress_bar = tqdm(range(max_train_steps // accelerator.num_processes),
                        disable=not accelerator.is_local_main_process)
    completed_steps = 0
    starting_epoch = 0

    # Potentially load in the weights and states from a previous save
    if args.resume_from_checkpoint:
        if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
            accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
            accelerator.load_state(args.resume_from_checkpoint)
            path = os.path.basename(args.resume_from_checkpoint)
        else:
            # Get the most recent checkpoint
            dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
            dirs.sort(key=os.path.getctime)
            path = dirs[-1]  # Sorts folders by date modified, most recent checkpoint is the last
        # Extract `epoch_{i}_{loss}` or `step_{i}_{loss}`
        logger.debug(path)
        training_difference = path

        if "epoch" in training_difference:
            starting_epoch = int(training_difference.replace("epoch_", "")) + 1
            # starting_epoch = int(training_difference.split("_")[1]) + 1
            resume_step = None
        else:
            resume_step = int(training_difference.replace("step_", ""))
            # resume_step = int(training_difference.split("_")[1])
            starting_epoch = resume_step // len(train_dataloader)  # len(train_dataloader)=6250=100000/8/2
            resume_step -= starting_epoch * len(train_dataloader)

    # update the progress_bar if load from checkpoint
    progress_bar.update(starting_epoch * num_update_steps_per_epoch)
    completed_steps = starting_epoch * num_update_steps_per_epoch

    for epoch in range(starting_epoch, 3):
        model.train()
        total_loss = 0
        for step, batch in enumerate(train_dataloader):
            # We need to skip steps until we reach the resumed step
            if args.resume_from_checkpoint and epoch == starting_epoch:
                if resume_step is not None and step < resume_step:
                    progress_bar.update(1)
                    completed_steps += 1
                    continue

            with accelerator.accumulate(model):
                input_ids = batch["input_ids"].tolist()
                attention_mask = batch["attention_mask"].tolist()
                text_infilling(input_ids, attention_mask, args, tokenizer)
                batch["input_ids"] = torch.LongTensor(input_ids).to(accelerator.device)
                batch["attention_mask"] = torch.LongTensor(attention_mask).to(accelerator.device)
                # logger.debug(tokenizer.decode(batch["input_ids"][0]))
                # logger.debug(tokenizer.decode(batch["labels"][0]))
                outputs = model(**batch)
                loss = outputs.loss
                total_loss += loss.detach().float()
                accelerator.backward(loss)
                optimizer.step()
                lr_scheduler.step()
                optimizer.zero_grad()

            # Checks if the accelerator has performed an optimization step behind the scenes
            if accelerator.sync_gradients:
                progress_bar.update(1)
                completed_steps += 1

            if completed_steps % args.checkpointing_steps == 0:
                accelerator.log({"loss": loss.detach().item()}, step=completed_steps)
                output_dir = f"step_{completed_steps}"
                if args.output_dir is not None:
                    output_dir = os.path.join(args.output_dir, output_dir)
                accelerator.save_state(output_dir)

        model.eval()
        losses = []
        for step, batch in enumerate(eval_dataloader):
            with torch.no_grad():
                input_ids = batch["input_ids"].tolist()
                attention_mask = batch["attention_mask"].tolist()
                text_infilling(input_ids, attention_mask, args, tokenizer)
                batch["input_ids"] = torch.LongTensor(input_ids).to(accelerator.device)
                batch["attention_mask"] = torch.LongTensor(attention_mask).to(accelerator.device)
                outputs = model(**batch)

            loss = outputs.loss
            losses.append(accelerator.gather_for_metrics(loss.repeat(8)))

        losses = torch.cat(losses)
        try:
            eval_loss = torch.mean(losses)
            perplexity = math.exp(eval_loss)
        except OverflowError:
            perplexity = float("inf")

        logger.info(f"epoch {epoch}: perplexity: {perplexity}")

        output_dir = f"epoch_{epoch}"
        if args.output_dir is not None:
            output_dir = os.path.join(args.output_dir, output_dir)
        accelerator.save_state(output_dir)


if __name__ == "__main__":
    main()
