import wandb
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from train import st_train
from dataclasses import asdict
from datasets import load_dataset
from peft import get_peft_model
from hf_args import lora_config, train_parser

def main():
    # init param
    model_args, data_args, training_args = train_parser()
    data_dict = asdict(data_args)
    wandb.init(project='codefill', name=training_args.run_name)
    
    # data_trans_tools
    data_formator = lambda data, tokenizer, max_length, **kwargs: (
        (lambda encoded: {
            "input_ids": encoded["input_ids"],
            "attention_mask": torch.where(encoded["input_ids"] == 0, 
                                        torch.zeros_like(encoded["attention_mask"]), 
                                        encoded["attention_mask"]),
            "labels": encoded["input_ids"].clone()
        })(
            tokenizer.batch_encode_plus(
                [f"{tokenizer.prefix_token}{prefix[-512:]}{tokenizer.suffix_token}{suffix[:512]}{tokenizer.middle_token}{mid[:512]}{tokenizer.eos_token}"
                for prefix, suffix, mid in zip(data['prefix'], data['suffix'], data['mid'])],
                max_length=max_length,
                padding="max_length",
                truncation=True,
                return_tensors="pt"
            )
        )
    )

    data_collator = lambda data: {
        key: torch.tensor([item[key] for item in data])
        for key in ["input_ids", "attention_mask", "labels"]
    }

    # get model
    model = AutoModelForCausalLM.from_pretrained(model_args.model_path, trust_remote_code=True)
    model = get_peft_model(model, lora_config)
    
    # get tokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_args.model_path)
    tokenizer.pad_token_id = tokenizer.unk_token_id

    # get train/eval data
    train_dataset = load_dataset('parquet', data_files={'train':data_args.train_dataset_path.split(',')})['train']
    eval_dataset = load_dataset('parquet', data_files={'train':data_args.eval_dataset_path})['train']
    
    train_dataset = train_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": tokenizer,
        **data_dict
    })
    eval_dataset = eval_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": tokenizer,
        **data_dict
    })
        
    # start train
    st_train(model, training_args, train_dataset, eval_dataset, tokenizer, data_collator)

if __name__ == '__main__':
    main()
