import os 
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import torch, gc
gc.collect()
torch.cuda.empty_cache()
import deepspeed
# DS_CONFIG = "ds_zero2_no_offload.json"
DS_CONFIG = "ds_z3_offload_config.json"

from datasets import Dataset
from modelscope import snapshot_download, AutoTokenizer
from swanlab.integration.transformers import SwanLabCallback
from qwen_vl_utils import process_vision_info
from peft import LoraConfig, TaskType, get_peft_model, PeftModel,get_peft_model_state_dict
from transformers import (
    TrainingArguments,
    Trainer,
    DataCollatorForSeq2Seq,
    Qwen2_5_VLForConditionalGeneration,
    AutoProcessor,
)
import swanlab
import json



def process_func_batch(examples):
    MAX_LENGTH = 2048
    input_ids, attention_mask, labels = [], [], []
    pixel_values, image_grid_thw = [], []
    
    # 遍历每个样本
    for instruction, input_text, output_text in zip(examples["instruction"], examples["input"], examples["output"]):
        # 构建 messages
        messages = [
                    {
                        "role": "system",
                        "content": [
                            {"type": "text", "text": instruction},
                        ],
                    },
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": f'{input_text}'}
                        ]
                    }
                ]
        
        # 处理视觉信息

        image_inputs = None

        video_inputs = None
        
        # 使用 processor 处理文本
        text = processor.apply_chat_template(
            messages, tokenize=False, add_generation_prompt=True
        )
        
        # 处理文本输入
        inputs = processor(
            text=[text],
            images=image_inputs,  # 添加图像输入
            videos=video_inputs,  # 添加视频输入
            padding=False,  # 先不填充
            return_tensors="pt",
        )

        # 提取 input_ids 和 attention_mask
        inputs_dict = {key: value.tolist() for key, value in inputs.items()}
        instruction_input_ids = inputs_dict['input_ids'][0]
        instruction_attention_mask = inputs_dict['attention_mask'][0]

        # 处理输出文本
        response = tokenizer(f"{output_text}", add_special_tokens=False)
        response_input_ids = response['input_ids']
        response_attention_mask = response['attention_mask']

        # 计算剩余可用长度给 response
        remaining_length = MAX_LENGTH - len(instruction_input_ids) - 1  # 减去一个 PAD token 的空间

        if remaining_length < 0:
            # 如果指令部分已经超过最大长度，则需要截断指令部分
            truncation_length = len(instruction_input_ids) + remaining_length
            instruction_input_ids = instruction_input_ids[:truncation_length]
            instruction_attention_mask = instruction_attention_mask[:truncation_length]
            remaining_length = 0

        # 截断 response 部分以适应剩余空间
        current_input_ids = (
            instruction_input_ids + response_input_ids[:remaining_length] + [tokenizer.pad_token_id]
        )

        current_attention_mask = (
            instruction_attention_mask + response_attention_mask[:remaining_length] + [1]
        )
        current_labels = (
            [-100] * len(instruction_input_ids) +
            response_input_ids[:remaining_length] +
            [tokenizer.pad_token_id]
        )
        
        # 填充到 MAX_LENGTH
        if len(current_input_ids) < MAX_LENGTH:
            current_input_ids += [tokenizer.pad_token_id] * (MAX_LENGTH - len(current_input_ids))
            current_attention_mask += [0] * (MAX_LENGTH - len(current_attention_mask))
            current_labels += [-100] * (MAX_LENGTH - len(current_labels))

        # 添加到列表中
        input_ids.append(current_input_ids)
        attention_mask.append(current_attention_mask)
        labels.append(current_labels)
        


    # 返回结果
    return {
        "input_ids": torch.tensor(input_ids),  # 转换为 torch.Tensor
        "attention_mask": torch.tensor(attention_mask),  # 转换为 torch.Tensor
        "labels": torch.tensor(labels),  # 转换为 torch.Tensor

    }



def predict(messages, model):
    # 准备推理  
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )

    device = next(model.parameters()).device
    # 将所有张量移动到指定的设备上
    for key, value in inputs.items():
        inputs[key] = value.to(device)
    
    # 生成输出
    generated_ids = model.generate(**inputs, max_new_tokens=256,do_sample=True)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    del inputs
    
    return output_text[0]


# 在modelscope上下载Qwen2-VL模型到本地目录下
# model_dir = snapshot_download("Qwen/Qwen2-VL-2B-Instruct", cache_dir="./", revision="master")

# 使用Transformers加载模型权重
tokenizer = AutoTokenizer.from_pretrained("/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct/", use_fast=True)
min_pixels = 256*28*28
max_pixels = 1280*28*28
processor = AutoProcessor.from_pretrained("/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels,use_fast=True)
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}   
model = Qwen2_5_VLForConditionalGeneration.from_pretrained("/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct/", device_map=device_map, torch_dtype=torch.bfloat16)

model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法/

# 处理数据集：读取json文件
# 分别加载 test 和 val 数据集
train_ds = Dataset.from_json("train_set.json")
train_dataset = train_ds.map(process_func_batch, batched=True)
val_ds = Dataset.from_json("val_set.json")
val_dataset = val_ds.map(process_func_batch, batched=True)
test_ds = Dataset.from_json("test_set.json")
test_dataset = test_ds.map(process_func_batch, batched=True)





# 配置LoRA
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=False,  # 训练模式
    r=32,  # Lora 秩
    lora_alpha=16,  # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.05,  # Dropout 比例
    bias="none",
)

# 获取LoRA模型
# 转换模型
peft_model = get_peft_model(model, config)
peft_model.config.use_cache = False



# 配置训练参数
args = TrainingArguments(
    output_dir="./output/Qwen2.5-VL-7B",
    per_device_train_batch_size=2,
    gradient_accumulation_steps=8,
    logging_steps=10,
    logging_first_step=10,
    num_train_epochs=3,
    save_steps=30,
    learning_rate=1e-4,
    save_on_each_node=True,
    gradient_checkpointing=True,
    report_to="none",
    # bf16=True,
    fp16=True,
    max_grad_norm=1.0, 
    deepspeed=DS_CONFIG,
    # evaluation_strategy="steps",  # 按步骤评估
    # eval_steps=20,  # 每 10 步评估一次
)
        
# 设置SwanLab回调
swanlab_callback = SwanLabCallback(
    project="Qwen2.5-VL-finetune",
    experiment_name="qwen2.5-vl",
    config={
        "model": "https://modelscope.cn/models/Qwen/Qwen2.5-VL-7B-Instruct",
        "dataset": "https://huggingface.co/datasets/Shekswess/medical_llama3_instruct_dataset_short",
        "github": "https://github.com/datawhalechina/self-llm",
        "prompt": "Answer the question truthfully, you are a medical professional",
        "train_data_number": len(train_dataset),
        "lora_rank": 32,
        "lora_alpha": 16,
        "lora_dropout": 0.1,
    },
)

# 配置Trainer
trainer = Trainer(
    model=peft_model,
    args=args,
    train_dataset=train_dataset,
    eval_dataset=val_dataset,
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
    callbacks=[swanlab_callback],
)


# 开启模型训练
trainer.train(resume_from_checkpoint=True)

# 使用 torch.inference_mode()
# with torch.inference_mode():  # 更高效的推理模式
#     peft_model.eval()  # 设置模型为验证模式
#     eval_results = trainer.evaluate(eval_dataset=val_dataset)
#     print("验证集评估结果:", eval_results)
trainer.save_model('./output/Qwen2.5-VL-7B')
trainer.save_state()


# ====================测试模式===================
# 配置测试参数
val_config = LoraConfig(  
    task_type=TaskType.CAUSAL_LM,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=True,  # 训练模式
    r=32,  # Lora 秩
    lora_alpha=16,  # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.05,  # Dropout 比例
    bias="none",
)


# 获取测试模型
val_peft_model = PeftModel.from_pretrained(model, model_id="./output/Qwen2.5-VL-7B", config=val_config)


# 创建一个列表来保存所有需要的信息
results_to_save = []

# 同时创建test_image_list用于swanlab日志记录
test_image_list = []

for item in test_dataset:
    # 准备输入消息
    messages = [
        {
                "role": "system",
                "content": [
                    {"type": "text", "text": item['instruction']},
                ],
        },
        {
        "role": "user", 
        "content": [
            {
                "type": "text",
                "text": item['input']
            }
        ]}]
    
    # 获取模型预测
    response = predict(messages, val_peft_model)
    messages.append({"role": "assistant", "content": f"{response}"})
    
    # 打印或记录预测信息
    print(messages[-1])

    # 添加预测结果、原始答案和到结果列表中
    results_to_save.append({
        'instruction':item['instruction'],
        'question':item['input'],
        'original_answer': item['output'],
        'predicted_answer': response,
    })

    # 同时添加到test_image_list用于SwanLab日志记录
    # test_image_list.append(swanlab.Image(item['image_path'], caption=response))
    test_image_list.append({
        "input": item['input'],
        "response": response,
    })

# 定义保存文件的路径
output_file_path = './predictions_results.json'

# 将结果写入JSON文件
with open(output_file_path, 'w', encoding='utf-8') as file:
    json.dump(results_to_save, file, ensure_ascii=False, indent=4)

print(f"Results have been saved to {output_file_path}")
swanlab.init()
# 使用SwanLab记录预测结果
swanlab.log({"Prediction": test_image_list})

# 在Jupyter Notebook中运行时要停止SwanLab记录，需要调用swanlab.finish()
swanlab.finish()
