from peft import LoraConfig, TaskType
import torch
from dataclasses import asdict
from datasets import load_dataset

lora_config = LoraConfig(
    r=8,
    task_type=TaskType.CAUSAL_LM,
    lora_alpha=16,
    lora_dropout=0.05,
    target_modules=["q_proj", "v_proj"]
)

def data_formator(data, tokenizer, max_length,  **kwargs):
    data_ = []
    for idx, prefix in enumerate(data['prefix']):
        data_.append(f"{tokenizer.prefix_token}{prefix[-512:]}{tokenizer.suffix_token}{data['suffix'][idx][:512]}{tokenizer.middle_token}{data['mid'][idx][:512]}{tokenizer.eos_token}")
            
    result = tokenizer.batch_encode_plus(data_, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
    input_ids = result["input_ids"]

    attention_mask = result["attention_mask"]
    attention_mask = torch.where(input_ids == 0, torch.zeros_like(attention_mask), attention_mask)

    labels = input_ids.clone()

    outputs = {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }
    return outputs

def map_dataset(train_dataset, eval_dataset, data_args, tokenizer):
    train_dataset = train_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": tokenizer,
        **asdict(data_args)
    })
    eval_dataset = eval_dataset.map(data_formator, batched=True, num_proc=data_args.num_data_proc, fn_kwargs={
        "tokenizer": tokenizer,
        **asdict(data_args)
    })
    return train_dataset, eval_dataset

def data_collator(data):
    input_ids = torch.tensor([item["input_ids"] for item in data])
    attention_mask = torch.tensor([item["attention_mask"] for item in data])
    labels = torch.tensor([item["labels"] for item in data])
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

def load_dataset_tokenizer(data_args, tokenizer):
    train_dataset = load_dataset('parquet', data_files={'train':data_args.train_dataset_path.split(',')})['train']
    eval_dataset = load_dataset('parquet', data_files={'train':data_args.eval_dataset_path})['train']
    
    train_dataset, eval_dataset = map_dataset(train_dataset, eval_dataset, data_args, tokenizer)
    
    return train_dataset, eval_dataset