from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer
import torch
import wandb
from peft import get_peft_model
from args_train import parse_train, lora_set
from data_utils import get_train_eval_data

def collator_fn(data):
    attention_mask = torch.tensor([item["attention_mask"] for item in data])
    labels = torch.tensor([item["labels"] for item in data])
    input_ids = torch.tensor([item["input_ids"] for item in data])
    
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

def formator_fn(data, tokenizer, max_length,  **kwargs):
    data_ = []
    for idx, prefix in enumerate(data['prefix']):
        data_.append(f"{tokenizer.prefix_token}{prefix[-512:]}{tokenizer.suffix_token}{data['suffix'][idx][:512]}{tokenizer.middle_token}{data['mid'][idx][:512]}{tokenizer.eos_token}")
            
    result = tokenizer.batch_encode_plus(data_, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
    attention_mask = result["attention_mask"]
    input_ids = result["input_ids"]
    attention_mask = torch.where(input_ids == 0, torch.zeros_like(attention_mask), attention_mask)
    labels = input_ids.clone()

    outputs = {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }
    return outputs
    
def main():
    
    model_args, dataset_args, training_args = parse_train()
    
    model = AutoModelForCausalLM.from_pretrained(model_args['pretrained_model_name_or_path'], trust_remote_code=True)
    model = get_peft_model(model, lora_set)

    wandb.init(project='train_model', name='makeit')
    
    tokenizer = AutoTokenizer.from_pretrained(model_args['pretrained_model_name_or_path'])
    tokenizer.pad_token_id = tokenizer.unk_token_id
     
    # load data
    train_dataset, eval_dataset = get_train_eval_data(dataset_args, formator_fn, tokenizer)
    
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        tokenizer=tokenizer,
        data_collator=collator_fn,
    )

    trainer.train()

if __name__ == '__main__':
    main()