from doasr.models.doasrformer.llm import DoAsrForCausalLM
from doasr.models.tokenizer import DoAsrTokenizer
from doasr.train_utils.pretraining_trainer import PretrainingTrainer
import torch
from transformers import TrainingArguments
from datasets import load_dataset
from datetime import datetime
import os
import sys

cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path, "..", "src"))


output_dir = os.path.join(cur_path, "..", "models_pretraining")

model_name = "DoASR-0.05B"
# model_name_or_path = os.path.join(cur_path, "..", "models", model_name)
model_name_or_path = os.path.join(
    cur_path, "..", "models_pretraining", model_name + "-pretraining_20240907230710")
model = DoAsrForCausalLM.from_pretrained(model_name_or_path)
model_config = model.config
tokenizer = DoAsrTokenizer.from_pretrained(model_name_or_path)

data_path = os.path.join(cur_path, "..", "dataset", "wiki_demo_for_doasr.txt")
dataset = load_dataset("text", data_files=data_path)


def tokenize_function(examples):
    return tokenizer([f"<|endoftext|> {e} <|endoftext|>" for e in examples["text"]], padding="max_length", truncation=True, max_length=512)


def data_collator(features):
    batch = {k: [f[k] for f in features] for k in features[0].keys()}
    batch = tokenizer.pad(
        batch, return_attention_mask=True, return_tensors="pt")
    batch['labels'] = batch['input_ids'].clone()
    batch['audio_embeds'] = torch.zeros(
        # just for placeholder
        batch['input_is'].shape[0], batch['input_ids'].shape[1], model_config.hidden_size, dtype=torch.float32, device=batch['input_ids'].device)

    return batch


training_args = TrainingArguments(
    output_dir=f"./{output_dir}/{model_name}-pretraining",
    overwrite_output_dir=True,
    num_train_epochs=10,
    per_device_train_batch_size=3,
    save_steps=10_000,
    save_total_limit=2,
    logging_steps=10,
)

trainer = PretrainingTrainer(
    model=model,
    args=training_args,
    train_dataset=dataset["train"].map(tokenize_function, batched=True),
    data_collator=data_collator,
)

trainer.train()

now_dt = datetime.now().strftime("%Y%m%d%H%M%S")
model.save_pretrained(f"{output_dir}/{model_name}-pretraining_{now_dt}")
tokenizer.save_pretrained(f"{output_dir}/{model_name}-pretraining_{now_dt}")

print("done")
