import os 
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
from datasets.features import Image  # 正确定义Image特征类型
from PIL import Image as PILImage  # 用于实际图像操作
import torch, gc
gc.collect()
torch.cuda.empty_cache()
from datasets import features, load_dataset,Value
from transformers import AutoModelForVision2Seq, AutoProcessor,Qwen2_5_VLForConditionalGeneration
from trl import DPOConfig, DPOTrainer
from peft import LoraConfig, TaskType, get_peft_model, PeftModel,get_peft_model_state_dict
import deepspeed
DS_CONFIG = "ds_zero2_no_offload.json"
# DS_CONFIG = "ds_z3_offload_config.json"


def main():
    # Load the model and processor
    device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}  
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        "/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct",
        torch_dtype=torch.bfloat16,
        attn_implementation="flash_attention_2",
        device_map=device_map,
    )
    
    model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法
    processor = AutoProcessor.from_pretrained("/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct", do_image_splitting=False,use_fast=True)
    
    # Load the dataset
    dataset = load_dataset("trl-lib/rlaif-v",split="train",cache_dir = './data').select(range(1000))


    # def format(example):
    #     # 提取prompt中的文本部分（跳过图像标记）
    #     prompt_text = next(
    #         content["text"] for item in example["prompt"] 
    #         for content in item["content"] 
    #         if content["type"] == "text" and content["text"] is not None
    #     )
        
    #     # 提取chosen和rejected的文本
    #     chosen_text = example["chosen"][0]["content"][0]["text"]
    #     rejected_text = example["rejected"][0]["content"][0]["text"]
        
    #     # 图像处理保持不变
    #     images = example["images"]
    #     if not isinstance(images, list):
    #         images = [images]
        
    #     processed_images = []
    #     for img in images:
    #         if isinstance(img, dict) and 'bytes' in img:
    #             img = PILImage.open(io.BytesIO(img['bytes']))
    #         elif isinstance(img, str):
    #             img = PILImage.open(img)
    #         processed_images.append(img)
        
    #     return {
    #         "images": processed_images,
    #         "prompt": prompt_text +'\n\nAnswer:',  # 现在只包含文本
    #         "chosen": chosen_text,   # 直接文本
    #         "rejected": rejected_text # 直接文本
    #     }

    def format(example):
        # 确保images是单层PIL图像列表
        images = example["images"]
        if not isinstance(images, list):
            images = [images]  # 如果单个图像，转为列表
        
        # 确保每个元素是PIL图像
        processed_images = []
        for img in images:
            if isinstance(img, dict) and 'bytes' in img:
                img = PILImage.open(io.BytesIO(img['bytes']))
            elif isinstance(img, str):
                img = PILImage.open(img)
            processed_images.append(img)

        prompt = processor.apply_chat_template(example["prompt"], tokenize=False)
        chosen = processor.apply_chat_template(example["chosen"], tokenize=False)
        rejected = processor.apply_chat_template(example["rejected"], tokenize=False)
        
        return {
            "images": processed_images,  # 单层PIL图像列表
            "prompt": prompt,
            "chosen": chosen,
            "rejected": rejected
        }

    # Apply the formatting function to the dataset
    dataset = dataset.map(format, remove_columns=dataset.column_names, num_proc=4)


    f = dataset.features
    # f["images"] = features.Sequence(features.Image(decode=True))
    dataset = dataset.cast(f)


    # 配置LoRA
    config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
        inference_mode=False,  # 训练模式
        r=32,  # Lora 秩_set_static_graph
        lora_alpha=16,  # Lora alaph，具体作用参见 Lora 原理
        lora_dropout=0.05,  # Dropout 比例
        bias="none",
    )
    
    # 获取LoRA模型
    # 转换模型
    peft_model = get_peft_model(model, config)
    peft_model.config.use_cache = False
    peft_model._set_gradient_checkpointing()
    
    # Train the model
    training_args = DPOConfig(
        output_dir="./output/Qwen2.5-VL-DPO",
        bf16=True,
        gradient_checkpointing=True,
        per_device_train_batch_size=2,
        gradient_accumulation_steps=8,
        num_train_epochs=3,
        # use_reentrant=False,
        logging_steps=5,
        save_steps=100,
        learning_rate=1e-4,
        logging_first_step=5,
        deepspeed=DS_CONFIG,
        max_grad_norm=1.0,
        beta=0.8
    )
    trainer = DPOTrainer(
        peft_model,
        ref_model=None, # not needed when using peft
        args=training_args,
        train_dataset=dataset,
        processing_class=processor,

        
    )

    trainer.train()

if __name__ == "__main__":
    main()