import argparse
from unsloth import FastLanguageModel, PatchFastRL, is_bfloat16_supported
from trl import GRPOConfig, GRPOTrainer
from reward_func_setup import get_reward_func
from data import get_gsm8k_dataset


parser = argparse.ArgumentParser()

parser.add_argument("-dp", "--dataset_path", type=str, help="dataset path")
parser.add_argument("-mp", "--model_path", type=str, help="model_path")

args = parser.parse_args()


max_seq_length = 1024  # 增加模型推理的长度
lora_rank = (
    64  # LoRA的秩 可以是 8, 16, 32, 64, 128 该秩越大模型表现得越聪明 但速度会更慢
)

model, tokenizer = FastLanguageModel.from_pretrained(
    model_name=args.model_path,
    max_seq_length=max_seq_length,
    load_in_4bit=True,
    fast_inference=True,  # 增加模型的推理速度
    max_lora_rank=lora_rank,
    gpu_memory_utilization=0.5,
)

model = FastLanguageModel.get_peft_model(
    model,
    r=lora_rank,
    target_modules=[
        "q_proj",
        "k_proj",
        "v_proj",
        "o_proj",
        "gate_proj",
        "up_proj",
        "down_proj",
    ],  # 选择要进行LoRA的线性层
    lora_alpha=lora_rank,
    use_gradient_checkpointing="unsloth",  # 启用长上下文微调
    random_state=3407,  # 控制随机数 保证结果可复现
)

max_prompt_length = 256

training_args = GRPOConfig(
    learning_rate=5e-6,
    adam_beta1=0.9,
    adam_beta2=0.99,
    weight_decay=0.1,
    warmup_ratio=0.1,
    lr_scheduler_type="cosine",
    optim="paged_adamw_8bit",
    logging_steps=1,
    per_device_train_batch_size=1,
    gradient_accumulation_steps=1,  # Increase to 4 for smoother training
    num_generations=6,  # Decrease if out of memory
    max_prompt_length=max_prompt_length,
    max_completion_length=max_seq_length - max_prompt_length,
    # num_train_epochs = 1, # Set to 1 for a full training run
    max_steps=250,
    save_steps=250,
    max_grad_norm=0.1,
    report_to="none",  # Can use Weights & Biases
    output_dir="outputs_QWen2.5-7B-vllm",
    # use_vllm = False
)

trainer = GRPOTrainer(
    model=model,
    processing_class=tokenizer,
    reward_funcs=get_reward_func(),
    args=training_args,
    train_dataset=get_gsm8k_dataset(args.dataset_path),
)
trainer.train()
