# from tokenizers import ByteLevelBPETokenizer
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from pathlib import Path
import os
import torch
from transformers import RobertaTokenizer
from transformers import RobertaForMaskedLM
from transformers import LineByLineTextDataset
from transformers import DataCollatorForLanguageModeling
from transformers import Trainer, TrainingArguments

token_dir = 'content/KantaiBERT'


def build_tokens():
    paths = [str(x) for x in Path(r"F:\书本资源\大模型应用解决方案\Transformers-for-NLP-2nd-Edition-main\Chapter04").glob("**/*.txt")]
    tokenizer = ByteLevelBPETokenizer()
    tokenizer.train(files=paths, vocab_size=52_000, min_frequency=2, special_tokens=[
        "<s>",
        "<pad>",
        "</s>",
        "<unk>",
        "<mask>",
    ])
    if not os.path.exists(token_dir):
        os.makedirs(token_dir)
    tokenizer.save_model(token_dir)


tokenizer = ByteLevelBPETokenizer(
    os.path.join(token_dir, "vocab.json"),
    os.path.join(token_dir, "merges.txt"),
)

print(tokenizer.encode("The Critique of Pure Reason."))

tokenizer._tokenizer.post_processor = BertProcessing(
    ("</s>", tokenizer.token_to_id("</s>")),
    ("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)

print(tokenizer.encode("The Critique of Pure Reason."))

from transformers import RobertaConfig

config = RobertaConfig(
    vocab_size=52_000,
    max_position_embeddings=514,
    num_attention_heads=12,
    num_hidden_layers=6,
    type_vocab_size=1,
)

print(config)

tokenizer = RobertaTokenizer.from_pretrained(token_dir, max_length=512)
model = RobertaForMaskedLM(config=config)
print(model)
print(model.num_parameters())

dataset = LineByLineTextDataset(
    tokenizer=tokenizer,
    file_path=r"F:\书本资源\大模型应用解决方案\Transformers-for-NLP-2nd-Edition-main\Chapter04\kant.txt",
    block_size=128,
)
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer,
    mlm=True,
    mlm_probability=0.15,
)
training_args = TrainingArguments(
    output_dir=token_dir,
    overwrite_output_dir=True,
    num_train_epochs=2,
    per_device_train_batch_size=128,
    save_steps=10_000,
    save_total_limit=2,
)
trainer = Trainer(
    model=model,
    args=training_args,
    data_collator=data_collator,
    train_dataset=dataset,
)
trainer.train()
trainer.save_model(token_dir)