# -*- coding: utf-8 -*-
# @Time    : 2025/2/14 19:28
# @Author  : 
# @File    : utils.py
# @Software: PyCharm 
# @Comment :

import os

import torch
from torch.distributed.tensor.parallel import ddp
import torch.distributed as dist

def qwen_format(example, tokenizer):
    """
    将数据集进行预处理
    """
    max_length = 384

    instruction = tokenizer(
        f"<|im_start|>system\n{example['instruction']}<|im_end|>\n<|im_start|>user\n{example['input']}<|im_end|>\n<|im_start|>assistant\n",
        add_special_tokens=False,
    )
    response = tokenizer(f"{example['output']}", add_special_tokens=False)
    input_ids = (
            instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id]
    )
    attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1]
    labels = (
            [-100] * len(instruction["input_ids"])
            + response["input_ids"]
            + [tokenizer.pad_token_id]
    )
    if len(input_ids) > max_length:  # 做一个截断
        input_ids = input_ids[:max_length]
        attention_mask = attention_mask[:max_length]
        labels = labels[:max_length]

    return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}

def init_distributed_mode():
    if not ddp: return

    dist.init_process_group(backend="nccl")
    ddp_rank = int(os.environ["RANK"])
    ddp_local_rank = int(os.environ["LOCAL_RANK"])
    ddp_world_size = int(os.environ["WORLD_SIZE"])
    device = f"cuda:{ddp_local_rank}"
    torch.cuda.set_device(device)

    return ddp_rank, ddp_local_rank, ddp_world_size, device