import os
import re
import numpy as np
import torch
from datasets import load_dataset, load_from_disk
from transformers import AutoModelForCausalLM, AutoTokenizer
from trl import GRPOConfig, GRPOTrainer

# 环境配置
os.environ["TOKENIZERS_PARALLELISM"] = "true"
# torch.set_float32_matmul_precision("high")  # PyTorch > 2.0 建议

# 猴子补丁规避第200个step的error
import torch_npu

scaler_class = torch_npu.npu.amp.grad_scaler.GradScaler


def patched_get_growth_tracker(self):
    if isinstance(self._growth_tracker, int):
        return self._growth_tracker  # 已经是 int，直接返回
    return self._growth_tracker.item()  # 是 Tensor，正常调用 .item()


scaler_class._get_growth_tracker = patched_get_growth_tracker


# ===== 1. 模型路径与数据路径 =====
model_name_ = "Qwen/Qwen3-1.7B-Base"
output_dir = "Qwen3-1.7B1-Base"
model_path = f"/home/ma-user/work/DownLoads/Models/{model_name_}"
cache_path = "/home/ma-user/work/DownLoads/Dataset/open-r1/DAPO-Math-17k-Processed"
processed_path = "/home/ma-user/work/DAPO-Math-17k-Processed-processed_dapo_math"


# ===== 2. 加载 & 缓存预处理数据 =====
def preprocess(example):
    return {
        "prompt": example["prompt"],
        "references": example["solution"],
    }


if not os.path.exists(processed_path):
    raw_dataset = load_dataset("open-r1/DAPO-Math-17k-Processed", "all", cache_dir=cache_path)
    train_dataset = raw_dataset["train"].map(preprocess, remove_columns=raw_dataset["train"].column_names, num_proc=64)
    train_dataset.save_to_disk(processed_path)
else:
    train_dataset = load_from_disk(processed_path)

# ===== 3. 加载模型与 tokenizer =====
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.float16)


# ===== 4. 奖励函数 =====
@torch.inference_mode()
def reward_accuracy(completions, prompts=None, references=None, **kwargs):
    def answer_split(text):
        return text.split("nswer:")[-1] if "nswer:" in text else ""

    def extract_number(text, ispred):
        pattern = re.compile(r"[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?")
        target = answer_split(text) if ispred else text
        match = pattern.search(target)
        return float(match.group()) if match else None

    preds = np.array([extract_number(c, True) for c in completions], dtype=np.float32)
    refs = np.array([extract_number(r, False) for r in references], dtype=np.float32)

    rewards = np.full_like(preds, -2.0)
    valid = ~(np.isnan(preds) | np.isnan(refs) | np.isinf(preds) | np.isinf(refs))
    rewards[valid] = np.where(np.abs(preds[valid] - refs[valid]) < 1e-4, 1.0, -1.0)
    return rewards


# ===== 5. 训练配置（包含 FSDP + 多卡启动支持） =====
training_args = GRPOConfig(
    temperature=1.2,
    max_prompt_length=655,
    max_completion_length=1500,
    output_dir=output_dir,
    num_train_epochs=3,
    logging_steps=5,
    save_strategy="steps",
    save_steps=50,
    save_total_limit=3,
    metric_for_best_model="train_reward",
    greater_is_better=True,
    report_to="swanlab",  # 你可换成 "none", "tensorboard"
    per_device_train_batch_size=2,
    gradient_accumulation_steps=8,
    fp16=True,
    bf16=False,
    fsdp="full_shard auto_wrap",
    fsdp_config={
        "mixed_precision": "fp16",
        "forward_prefetch": True,
        "use_orig_params": False,
        # "transformer_layer_cls_to_wrap": ["Qwen3DecoderLayer"],  # 核心包装层！换成你模型的 block 名
        "use_cpu": True,
    },
)

trainer = GRPOTrainer(
    model=model,
    args=training_args,
    reward_funcs=reward_accuracy,
    train_dataset=train_dataset,
)

if __name__ == "__main__":
    trainer.train()
