from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, HfArgumentParser
from transformers.trainer_utils import is_main_process
from dataclasses import field, dataclass, asdict
from datasets import load_dataset
from typing import Optional
import torch
import os
from peft import get_peft_model
from lora_config import lora_config
import wandb
torch.manual_seed(2618)


@dataclass()
class ModelArgumnets:
    pretrained_model_name_or_path: Optional[str] = field(
        default="/data02/models/CodeLlama-7b-hf"
    )

@dataclass()
class Datarguments:
    train_dataset_path: Optional[str] = field(
        default="test.parquet"
    )
    eval_dataset_path: Optional[str] = field(
        default="eval.parquet"
    )
    eval_size: Optional[int] = field(
        default=256
    )
    max_length: Optional[int] = field(
        default=512
    )
    num_data_proc: Optional[int] = field(
        default=16
    )
    skip_eos_token: Optional[bool] = field(
        default=False
    )

@dataclass()
class MyTrainingArguments(TrainingArguments):
    run_name: Optional[str] = field(
        default="atom"
    )
    output_dir: Optional[str] = field(
        default="checkpoints/"
    )
    per_device_train_batch_size: Optional[int] = field(
        default=4
    )
    per_device_eval_batch_size: Optional[int] = field(
        default=4
    )
    num_train_epochs: Optional[int] = field(
        default=20
    )
    weight_decay: Optional[float] = field(
        default=0
    )
    learning_rate: Optional[float] = field(
        default=1e-7
    )
    lr_scheduler_type: Optional[str] = field(
        default="cosine"
    )
    warmup_ratio: Optional[float] = field(
        default=0.1
    )
    eval_strategy: Optional[str] = field(
        default="steps"
    )
    eval_steps: Optional[int] = field(
        default=100
    )
    load_best_model_at_end: Optional[bool] = field(
        default=True
    )
    logging_strategy: Optional[str] = field(
        default="steps"
    )
    logging_steps: Optional[int] = field(
        default=1
    )
    save_strategy: Optional[str] = field(
        default="steps"
    )
    save_steps: Optional[int] = field(
        default=100
    )
    save_total_limit: Optional[int] = field(
        default=10
    )
    save_only_model: Optional[bool] = field(
        default=True
    )
    bf16: Optional[bool] = field(
        default=True
    )

def data_formator(data, tokenizer, max_length,  **kwargs):
    data_ = []
    for idx, prefix in enumerate(data['prefix']):
        data_.append(f"{tokenizer.prefix_token}{prefix[-512:]}{tokenizer.suffix_token}{data['suffix'][idx][:512]}{tokenizer.middle_token}{data['mid'][idx][:512]}{tokenizer.eos_token}")
            
    result = tokenizer.batch_encode_plus(data_, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
    input_ids = result["input_ids"]

    attention_mask = result["attention_mask"]
    attention_mask = torch.where(input_ids == 0, torch.zeros_like(attention_mask), attention_mask)

    labels = input_ids.clone()

    outputs = {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }
    return outputs

def data_collator(data):
    input_ids = torch.tensor([item["input_ids"] for item in data])
    attention_mask = torch.tensor([item["attention_mask"] for item in data])
    labels = torch.tensor([item["labels"] for item in data])
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

def main():
    parser = HfArgumentParser((ModelArgumnets, Datarguments,  MyTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    model = AutoModelForCausalLM.from_pretrained(model_args.pretrained_model_name_or_path, trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained(model_args.pretrained_model_name_or_path)
    tokenizer.pad_token_id = tokenizer.unk_token_id

    model = get_peft_model(model, lora_config)

    local_rank = int(os.environ["LOCAL_RANK"]) if "LOCAL_RANK" in os.environ else -1
    if is_main_process(local_rank):
        wandb.init(project='atom', name=training_args.run_name)
        model.print_trainable_parameters()

    train_dataset = load_dataset('parquet', data_files={'train':data_args.train_dataset_path.split(',')})['train']
    eval_dataset = load_dataset('parquet', data_files={'train':data_args.eval_dataset_path})['train']
    train_dataset = train_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": tokenizer,
        **asdict(data_args)
    })
    eval_dataset = eval_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": tokenizer,
        **asdict(data_args)
    })
        
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        tokenizer=tokenizer,
        data_collator=data_collator,
    )

    trainer.train()

if __name__ == '__main__':
    main()
