import os
import argparse
from datasets import load_dataset
from transformers import AutoTokenizer, DataCollatorForLanguageModeling, AutoModelForCausalLM, \
                         TrainingArguments, Trainer


# -----------------
#  Definition
# -----------------

def preprocess_function(examples):
    return tokenizer([s + ' <eos>' for s in examples["text"]], truncation=True)


def group_texts(examples):
    block_size = 128
    concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
    # for k in concatenated_examples.keys():
    #     concatenated_examples[k] += [tokenizer.pad_token] * int(len(concatenated_examples[k]) % block_size)
    total_length = len(concatenated_examples[list(examples.keys())[0]])
    result = {
        k: [t[i * block_size: (i + 1) * block_size] for i in range(0, total_length // block_size - 1)]
        for k, t in concatenated_examples.items()
    }
    result["labels"] = result["input_ids"].copy()
    return result


# -----------------
#  Configuration
# -----------------

parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 gpt-2 Language Model')
parser.add_argument('--train_data', type=str, default='data/gigaspeech/train.txt', help='location of train data')
parser.add_argument('--test_data', type=str, default='data/gigaspeech/test.txt', help='location of test data')
parser.add_argument('--valid_data', type=str, default='data/gigaspeech/valid.txt', help='location of valid data')
parser.add_argument('--output', type=str, default='gpt2', help='location of valid data')

args = parser.parse_args()

# dataset
giga = load_dataset('text', data_files={'train': args.train_data,
                                        'validation': args.valid_data,
                                        'test': args.test_data})

# tokenizer
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token

# preprocessing
tokenized_giga = giga.map(preprocess_function, batched=True, num_proc=4, remove_columns=giga["train"].column_names)
lm_dataset = tokenized_giga.map(group_texts, batched=True, num_proc=4)

# batchify
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, )

# model
model = AutoModelForCausalLM.from_pretrained("gpt2")

# fine_tune
os.makedirs(args.output, exist_ok=True)
training_args = TrainingArguments(output_dir=args.output, evaluation_strategy="epoch",
                                  learning_rate=2e-5, weight_decay=0.01, save_strategy="epoch",
                                  logging_strategy="epoch", num_train_epochs=10.0)

trainer = Trainer(model=model, args=training_args, train_dataset=lm_dataset["train"],
                  eval_dataset=lm_dataset["test"], data_collator=data_collator)
trainer.train()
