import os
import json
import re
from typing import List, Dict, Any
from qwen_vl_utils import fetch_image
from datasets import load_dataset
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
from argparse import ArgumentParser
from peft import LoraConfig
from trl import GRPOConfig, GRPOTrainer
import torch
from utils.universal import set_seed, promptTemplates

# ======= 基础环境与路径 =======
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
set_seed(42)

# ==========参数===================================
parser = ArgumentParser()
parser.add_argument("--model_path", type=str, default="../Downloads/Models/Qwen/Qwen2.5-VL-3B-Instruct")
parser.add_argument("--loss_type", type=str, default="grpo", choices=["dapo", "grpo", "dr_grpo"])
parser.add_argument("--output_dir", type=str, default="output")

args = parser.parse_args()
print(args)

image_root = "../datas/VisuRiddles"
train_json_path = "../datas/VisuRiddles/train_dataset.json"


# ======= 模型与处理器 =======
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(args.model_path, trust_remote_code=True, torch_dtype=torch.bfloat16)
# 使用use_fast=False来避免图像处理器的警告
processor = AutoProcessor.from_pretrained(args.model_path, trust_remote_code=True, use_fast=False)

# pad_token 兜底（与 SFT 脚本保持一致）
if processor.tokenizer.pad_token_id is None:
    processor.tokenizer.pad_token_id = processor.tokenizer.eos_token_id
    processor.pad_token_id = processor.tokenizer.pad_token_id
    print("Set pad_token_id to eos_token_id:", processor.tokenizer.pad_token_id)


# ======= 数据预处理（为 GRPO 准备 prompt/image/metadata） =======
def preprocess_to_rl(example: Dict[str, Any]) -> str:
    image_path = os.path.join(image_root, example["imgs"][0])
    option = f"option: {example['option']}\n" if example["option"] != "" else ""
    question = example["question"] + option + promptTemplates["Naive"] + 'Write the answer into a JSON form\n```json\n{"answer": "X"}```'

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": image_path,
                    "resized_height": 288,
                    "resized_width": 288,
                },
                {"type": "text", "text": question},
            ],
        },
    ]

    text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    image_inputs = fetch_image({"image": image_path})
    return {
        # GRPOTrainer 期望的列名：prompt / image（它会自行拼多模态模板）
        "prompt": text,
        "image": image_inputs,
        # # metadatas 里带上 gold_answer，reward 用
        "metadatas": {"gold_answer": example.get("gold_answer", None)},
    }


# ======= 加载数据集 =======
raw = load_dataset("json", data_files={"train": train_json_path})["train"]
rl_ds = raw.map(preprocess_to_rl, remove_columns=list(raw.column_names), num_proc=8)

# 简单切分 train/val（可按需调整）
split = rl_ds.train_test_split(test_size=0.3, seed=42)
train_ds = split["train"]
eval_ds = split["test"]

# ======= LoRA 配置（与你 SFT 里一致的 target_modules） =======
lora_config = LoraConfig(target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], init_lora_weights=True)

# ======= 奖励函数（文本型 Reward；TRL 暂不支持“视觉 Reward”）=======
"""
策略：
1) 从模型生成文本里解析出 JSON 片段，读取 "answer" 字段。
2) 与 metadata.gold_answer 做对比（大小写与空白无关）。
3) 完全一致给 1.0；否则 0.0。
4) 若输出符合 JSON 且包含 "answer" 字段，给 +0.1 结构奖励（上限 1.0）。

注意：GRPOTrainer 当前只支持文本 reward，不会把图像喂给 reward。此前我们已确认这一点。
"""

answer_regex = re.compile(r"```json\s*(\{.*?\})\s*```", re.DOTALL)


def extract_answer_block(text: str) -> dict:
    """解析 ```json {...} ``` 块，返回 dict，如果失败返回 None"""
    if not isinstance(text, str):
        return None
    m = answer_regex.search(text)
    if not m:
        return None
    block = m.group(1)
    try:
        return json.loads(block)
    except Exception:
        return None


def reward_fn(completions: List[str], prompts: List[str], metadatas: List[Dict[str, Any]], **kwargs) -> List[float]:
    rewards = []
    # Loop over the completions, which are a list of lists or mixed types
    for out_item, meta in zip(completions, metadatas):
        out = ""
        # Handle cases where the item is a list or a single string/dict
        if isinstance(out_item, list) and len(out_item) > 0:
            # We assume the first element of the inner list is the generated text
            out = out_item[0]
        elif isinstance(out_item, str):
            out = out_item
        else:
            # If the item is not a list or string, we cannot process it
            rewards.append(0.0)
            continue

        gold = str((meta or {}).get("gold_answer", "")).strip()
        parsed = extract_answer_block(out)

        if parsed is None:
            # Format error -> 0 points
            rewards.append(0.0)
            continue

        pred = str(parsed.get("answer", "")).strip()
        if pred.lower() == gold.lower():
            # Correct format + correct answer -> 2 points
            rewards.append(2.0)
        else:
            # Correct format + incorrect answer -> 1 point
            rewards.append(1.0)
    return rewards


# ======= GRPO 配置 =======
"""
多卡：建议用 torchrun 启动（见文末命令）。也可切换到 FSDP/Deepspeed。
这里把 remove_unused_columns=False 以保留图像列，供内部多模态拼接使用。
"""
grpo_config = GRPOConfig(
    loss_type=args.loss_type,
    output_dir=args.output_dir,
    per_device_train_batch_size=1,
    per_device_eval_batch_size=8,
    gradient_accumulation_steps=1,
    num_generations=8,  # 每个 prompt 采样 8 条
    top_k=20,
    num_train_epochs=3,  # 可换成 num_train_epochs
    learning_rate=5e-5,  # RL 一般用更小 LR；按需调
    lr_scheduler_type="cosine",
    warmup_ratio=0.05,
    logging_steps=5,
    save_strategy="steps",
    save_steps=50,
    save_only_model=True,
    eval_strategy="steps",
    eval_steps=50,
    report_to="swanlab",
    remove_unused_columns=False,
    fp16=False,
    bf16=True,
    max_prompt_length=8192,
    max_completion_length=2048,
    fsdp="full_shard auto_wrap",
    fsdp_config={
        "mixed_precision": "bf16",
        "forward_prefetch": True,
        "use_orig_params": False,
        "use_cpu": True,
        "offload_params": True,
        "offload_optimizer": True,
        "enable_gradient_checkpointing": True,
    },
)

# ======= 构建 GRPOTrainer =======
trainer = GRPOTrainer(
    model=model,
    processing_class=processor,  # 让内部根据 prompt+image 自动构建多模态输入
    args=grpo_config,
    train_dataset=train_ds,
    eval_dataset=eval_ds,
    reward_funcs=[reward_fn],  # 也可叠加多个 reward
    peft_config=lora_config,  # LoRA 低秩微调
)

# ======= 训练 =======
if __name__ == "__main__":
    # 在开始训练前，将 LoRA 模块的参数手动转换
    from peft.peft_model import PeftModel

    if isinstance(trainer.model, PeftModel):
        for param in trainer.model.parameters():
            if param.requires_grad:
                param.data = param.data.bfloat16()
        print("Successfully converted trainable LoRA parameters to bf16.")

    trainer.train()
    # 结束时保存 LoRA 权重
    trainer.save_model()
