# -------------------------------
#        微调模型
# -------------------------------
# scripts/train_lora.py
import os

os.environ["TOKENIZERS_PARALLELISM"] = "false"

import torch
from transformers import (
    AutoProcessor,
    AutoModelForVision2Seq,
    TrainingArguments,
    Trainer,
    DataCollatorForSeq2Seq
)
from peft import LoraConfig, get_peft_model
from datasets import load_dataset
from PIL import Image
import warnings

warnings.filterwarnings("ignore")
import torch

# -------------------------------
# 1. 参数配置
# -------------------------------
model_id = "./models/Qwen2.5-VL-3B-Instruct"  # 或使用本地路径 "./models/Qwen2.5-VL-7B-Instruct"
data_path = "./data/dataset.jsonl"
output_dir = "outputs/qwen25vl-herb-lora"
os.makedirs(output_dir, exist_ok=False)

# 设备设置（自动选择 GPU/CPU）
# device = "cuda" if torch.cuda.is_available() else "cpu"
device = "cpu"
print(f"✅ 1. 🚀 使用设备: {device}")

# -------------------------------
# 2. 加载 Processor 和 Model
# -------------------------------
processor = AutoProcessor.from_pretrained(
    model_id,
    trust_remote_code=True
)

# 加载 Model 到 CPU
model = AutoModelForVision2Seq.from_pretrained(
    model_id,
    torch_dtype=torch.float32,  # CPU 不推荐 float16（精度问题）
    device_map=device,          # 明确指定 CPU
    trust_remote_code=True,
    low_cpu_mem_usage=True,     # 低内存加载
)
print(f"✅ 2. 加载 Processor 和 Model")

# -------------------------------
# 3. LoRA 配置（参数高效微调）
# -------------------------------
lora_config = LoraConfig(
    r=16,
    lora_alpha=32,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM"
)

model = get_peft_model(model, lora_config)
model.print_trainable_parameters()  # 查看可训练参数量
print(f"✅ 3. LoRA 配置（参数高效微调）")

# -------------------------------
# 4. 加载数据集
# -------------------------------
# def load_and_preprocess(examples):
#     messages_list = []
#     for img_path, text in zip(examples["image"], examples["text"]):
#         try:
#             # 构造 message 格式
#             message = [
#                 {
#                     "role": "user",
#                     "content": [
#                         {"type": "image", "image": img_path},
#                         {"type": "text", "text": text}
#                     ]
#                 }
#             ]
#             messages_list.append(message)
#         except Exception as e:
#             print(f"⚠️ 跳过图像 {img_path}: {e}")
#
#     # 使用 apply_chat_template 生成包含 <image> 的提示
#     prompts = [processor.apply_chat_template(messages, tokenize=False) for messages in messages_list]
#     # 加载并预处理图像
#     images = [Image.open(msg[0]['content'][0]['image']).convert("RGB") for msg in messages_list]
#
#     # 使用 processor 处理多模态输入
#     inputs = processor(
#         images=images,
#         text=prompts,
#         return_tensors="pt",
#         padding=True,
#         truncation=True,
#         # max_length=256
#     )
#
#     # 添加 labels 用于计算 loss
#     # 在训练时，labels 通常与 input_ids 相同（自回归训练）
#     inputs["labels"] = inputs["input_ids"].clone()
#
#     return inputs
def load_and_preprocess(examples):
    messages_list = []
    valid_indices = []

    for idx, (img_path, text) in enumerate(zip(examples["image"], examples["text"])):
        try:
            # 构造 message 格式
            message = [
                {
                    "role": "user",
                    "content": [
                        {"type": "image", "image": img_path},
                        {"type": "text", "text": text}
                    ]
                }
            ]
            messages_list.append(message)
            valid_indices.append(idx)
        except Exception as e:
            print(f"⚠️ 跳过图像 {img_path}: {e}")

    if not messages_list:
        # 返回空字典如果没有任何有效数据
        return {}

    try:
        # 使用 apply_chat_template 生成包含 <image> 的提示
        prompts = [processor.apply_chat_template(messages, tokenize=False) for messages in messages_list]
        # 加载并预处理图像
        images = []
        valid_prompts = []

        for i, msg in enumerate(messages_list):
            try:
                image = Image.open(msg[0]['content'][0]['image']).convert("RGB")
                images.append(image)
                valid_prompts.append(prompts[i])
            except Exception as e:
                print(f"⚠️ 加载图像失败: {msg[0]['content'][0]['image']}: {e}")

        if not images:
            return {}

        # 使用 processor 处理多模态输入
        inputs = processor(
            images=images,
            text=valid_prompts,
            return_tensors="pt",
            padding=True,
            truncation=True,
        )

        # 添加 labels 字段用于计算 loss
        inputs["labels"] = inputs["input_ids"].clone()

        return inputs
    except Exception as e:
        print(f"⚠️ 数据处理错误: {e}")
        return {}


# 使用 load_dataset 读取 jsonl
dataset = load_dataset("json", data_files=data_path, split="train")
print(f"✅ 4. 加载数据集")

# -------------------------------
# 5. 数据整理器（Collator）
# -------------------------------
class VLDataCollator:
    def __call__(self, features):
        return load_and_preprocess({
            "image": [f["image"] for f in features],
            "text": [f["text"] for f in features]
        })
print(f"✅ 5. 数据整理器（Collator）")

# -------------------------------
# 6. 训练参数
# -------------------------------
training_args = TrainingArguments(
    output_dir=output_dir,
    per_device_train_batch_size=1,  # CPU 只能用 1
    gradient_accumulation_steps=4,  # 模拟 batch_size=8
    learning_rate=1e-4,             # 稍微调高一点
    # num_train_epochs=3,
    save_steps=30,
    logging_steps=10,
    save_total_limit=2,
    remove_unused_columns=False,
    report_to="none",
    warmup_ratio=0.1,
    optim="adamw_torch",
    logging_dir="logs",
    # evaluation_strategy="no",     # 如果报错，可注释
    # eval_strategy="no",           # 新版本用这个
)
print(f"✅ 6. 设置训练参数")

# -------------------------------
# 7. 创建 Trainer
# -------------------------------
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=dataset,
    data_collator=VLDataCollator(),
)
print(f"✅ 7. 创建 Trainer")

# -------------------------------
# 8. 开始训练！
# -------------------------------
print("🔥 开始微调...")
model.train()  # 确保模型处于训练模式
trainer.train()

# -------------------------------
# 9. 保存最终模型
# -------------------------------
print("💾 保存 LoRA 适配器...")
model.save_pretrained(output_dir)
processor.save_pretrained(output_dir)

print("🎉 微调完成！模型已保存到:", output_dir)