
from transformers import AutoTokenizer , Trainer
from transformers import AutoModelForCausalLM,TrainingArguments
from peft import LoraConfig, TaskType
from peft import get_peft_model
import torch

def lora_cret(r=8, target_modules=["q_proj", "v_proj"], task_type=TaskType.CAUSAL_LM, lora_alpha=16, lora_dropout=0.05):
    """
    Creates a LoraConfig object with the specified parameters.
    
    Args:
        r (int): The rank for the LORA layers.
        target_modules (list): List of module names to apply LORA layers.
        task_type (TaskType): The task type for LORA, e.g., CAUSAL_LM.
        lora_alpha (int): The scaling factor for LORA layers.
        lora_dropout (float): The dropout rate for LORA layers.

    Returns:
        LoraConfig: The configured LoraConfig object.
    """
    return LoraConfig(
        r=r,
        target_modules=target_modules,
        task_type=task_type,
        lora_alpha=lora_alpha,
        lora_dropout=lora_dropout
    )



def init_model(model_arg):
    mod = AutoModelForCausalLM.from_pretrained(model_arg.pretrained_model_name_or_path, trust_remote_code=True)
    lora_conf = lora_cret()
    mod = get_peft_model(mod, lora_conf)
    return mod

def init_tok(model_arg):
    tok = AutoTokenizer.from_pretrained(model_arg.pretrained_model_name_or_path)
    tok.pad_token_id = tok.unk_token_id
    # tok.prefix_token = "<prefix>"
    # tok.suffix_token = "<fim_suffix>"
    # tok.middle_token = "<FILL-ME>"
    tok.eos_token = tok.eos_token
    tok.pad_token = tok.eos_token 
    return tok


def init_model_tok(model_arg):
    mod = AutoModelForCausalLM.from_pretrained(model_arg.pretrained_model_name_or_path, trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained(model_arg.pretrained_model_name_or_path)
    tokenizer.pad_token_id = tokenizer.unk_token_id

    lora_conf = lora_cret()
    mod = get_peft_model(mod, lora_conf)
    return mod,tokenizer

# 初始化Trainer
def init_rainer(model, train_arg: TrainingArguments, train_dataset, eval_dataset, tokenizer) -> Trainer:
    
    
    return Trainer(
        model=model,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        args=train_arg,
        tokenizer=tokenizer,

        data_collator=lambda data: {"input_ids": torch.tensor(([f["input_ids"] for f in data])), 
                                    "attention_mask": torch.tensor(([f["attention_mask"] for f in data])), 
                                    "labels": torch.tensor(([f["labels"] for f in data]))}
    )



def process_tokens(data, tokenizer, max_length):
    """
    处理每个数据样本，将 prefix, suffix, mid 合并为一个字符串，并进行适当的截断和拼接。
    """
    data_ = []
    for idx, prefix in enumerate(data['prefix']):
        # 拼接 token，并确保每个部分的长度不会超过512
        formatted_str = (
            f"{tokenizer.prefix_token}{prefix[-512:]}{tokenizer.suffix_token}"
            f"{data['suffix'][idx][:512]}{tokenizer.middle_token}{data['mid'][idx][:512]}"
            f"{tokenizer.eos_token}"
        )
        data_.append(formatted_str)
    return data_

def encode_data(data_, tokenizer, max_length):
    """
    使用 tokenizer 对格式化后的数据进行批量编码。
    """
    return tokenizer.batch_encode_plus(
        data_,
        max_length=max_length,
        padding="max_length",
        truncation=True,
        return_tensors="pt"
    )

def create_attention_mask(input_ids, attention_mask):
    """
    对 input_ids 中的 padding 部分进行特殊处理，生成相应的 attention mask。
    """
    return torch.where(input_ids == 0, torch.zeros_like(attention_mask), attention_mask)

def process_data(data, tokenizer, max_length, **kwargs):
    """
    数据格式化函数，结合了不同的处理步骤。
    """
    # 处理 token 拼接
    formatted_data = process_tokens(data, tokenizer, max_length)
    
    # 批量编码数据
    result = encode_data(formatted_data, tokenizer, max_length)
    
    input_ids = result["input_ids"]
    attention_mask = result["attention_mask"]
    
    # 生成 attention mask
    attention_mask = create_attention_mask(input_ids, attention_mask)
    
    # 创建标签（克隆 input_ids）
    labels = input_ids.clone()
    
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }