import math
from argparse import ArgumentParser
from transformers import AutoModelForMaskedLM, AutoTokenizer, DataCollatorForLanguageModeling, TrainingArguments, \
    Trainer
from datasets import load_dataset, load_from_disk

parser = ArgumentParser('MLM Finetune')
parser.add_argument('--pretrained', type=str, default='bert-base-chinese')
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--output_dir', type=str, default='finetuned')
parser.add_argument('--lr', type=float, default=2e-5)
parser.add_argument('--wd', type=float, default=0.01)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--log_freq', type=int, default=20)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--mlm_prob', type=float, default=0.15)
parser.add_argument('--chunk_size', type=int, default=128)

args = parser.parse_args()

# load model
model_checkpoint = args.pretrained
model = AutoModelForMaskedLM.from_pretrained(model_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)

assert args.dataset != ''
dataset = load_from_disk(args.dataset)


def tokenize_function(examples):
    result = tokenizer(examples["text"])
    if tokenizer.is_fast:
        result["word_ids"] = [result.word_ids(i) for i in range(len(result["input_ids"]))]
    return result


old_columns = list(dataset.features.keys())
tokenized_datasets = dataset.map(
    tokenize_function, batched=True, remove_columns=old_columns
)


def group_texts(examples):
    chunk_size = args.chunk_size
    # Concatenate all texts
    concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
    # Compute length of concatenated texts
    total_length = len(concatenated_examples[list(examples.keys())[0]])
    # We drop the last chunk if it's smaller than chunk_size
    total_length = (total_length // chunk_size) * chunk_size
    # Split by chunks of max_len
    result = {
        k: [t[i: i + chunk_size] for i in range(0, total_length, chunk_size)]
        for k, t in concatenated_examples.items()
    }
    # Create a new labels column
    result["labels"] = result["input_ids"].copy()
    return result


lm_datasets = tokenized_datasets.map(group_texts, batched=True)

data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=args.mlm_prob)
training_args = TrainingArguments(
    output_dir=args.output_dir,
    overwrite_output_dir=True,
    evaluation_strategy="epoch",
    num_train_epochs=args.epochs,
    learning_rate=args.lr,
    weight_decay=args.wd,
    per_device_train_batch_size=args.bs,
    per_device_eval_batch_size=args.bs,
    push_to_hub=False,
    fp16=True,
    logging_steps=args.log_freq,
)
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=lm_datasets,
    eval_dataset=lm_datasets,
    data_collator=data_collator,
    tokenizer=tokenizer,
)

eval_results = trainer.evaluate()
print(f">>> Befre Training Perplexity: {math.exp(eval_results['eval_loss']):.2f}")

trainer.train()
eval_results = trainer.evaluate()
print(f">>> After Training Perplexity: {math.exp(eval_results['eval_loss']):.2f}")

trainer.save_model(args.output_dir)
