import sys
sys.path.append("/Users/yishanli/python/myProject/train-model")
import os 
import torch
from datasets import load_dataset
from transformers import AutoTokenizer,AutoModelForCausalLM,TrainingArguments
from trl import GRPOConfig,GRPOTrainer

from method.ModelConfig import ModelConfig
from method.GRPOScriptArguments import GRPOScriptArguments
from method.GetRewardFunctions import get_reward_functions
from method.LoggingCallback import get_callbacks

#os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'  # 添加镜像源
MODEL_NAME ="/Users/yishanli/python/myProject/train-model/model"
OUTPUT_DIR ="data/Qwen-GRPO-training"
# 创建输出目录，如果目录不存在
os.makedirs(OUTPUT_DIR, exist_ok=True)
# 初始化 tokenizer，并指定聊天模板
tokenizer = AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
    MODEL_NAME,trust_remote_code=True,padding_side="right"
)
# 若 pad token 未设置deephub，则指定 pad token 为 eos token
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token


# 初始化基础模型
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME,trust_remote_code=True,torch_dtype=torch.bfloat16)
print(f"Model parameters: {model.num_parameters():,}")


# 检查 CUDA 是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

SYSTEM_PROMPT = (
    "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about" \
    "the reasoning process in the mind and then deephub provides the user with the answer. The reasoning process and answer are enclosed" \
    "within<think> </think> and <answer> </answer> tags, respectively, i.e <think> reasoning process here </think> <answer> answer here </answer>"

)
def make_conversation(example):
    return{
        "prompt":[
            {"role":"system","content":SYSTEM_PROMPT},
            {"role":"user","content":example["problem"]}
        ],
    }
def load_math_dataset():
    local_data_dir = '/Users/yishanli/python/myProject/train-model/train'
    dataset = load_dataset(
        local_data_dir,
        name="default",
        split=['train','test']
    )
    dataset = {
        'train':dataset[0],
        'test':dataset[1]
    }

    for split in dataset:
        dataset[split] = dataset[split].map(make_conversation)
        if "messages" in dataset[split].column_names:
            dataset[split] = dataset[split].remove_columns("messages")
    return dataset
    
dataset = load_math_dataset()
print(f"Train set size:{len(dataset['train'])}")
print(f"Test set size:{len(dataset['test'])}")

def validate_dataset(dataset):
    required_fields = ["problem","prompt"]
    for split in ['train','test']:
        print(f"\nValidating {split} split:")
        fields = dataset[split].column_names
        missing =[field for field in required_fields if field not in fields]
        if missing:
            print(f"Warning:Missing fields:{missing}")
        else:
            print("✅ All required fields present")
        sample = dataset[split][0]
        message = sample['prompt']
        if(len(message) >= 2 and 
           message[0]['role'] == 'system' and
           message[1]['role'] == 'user'):
            print("✅ Prompt format is correct")
        else:
            print("Warning: Incorrect prompt format")
validate_dataset(dataset)

training_args = TrainingArguments(
    output_dir=OUTPUT_DIR,          # 检查点和日志输出目录
    overwrite_output_dir=True,
    num_train_epochs=1,             # 训练的总 epoch 数
    per_device_train_batch_size=8,  # 每个设备的训练批次大小
    per_device_eval_batch_size=16,   # 评估批次大小
    gradient_accumulation_steps=2,  # 梯度累积步数，用于模拟更大的批次大小
    learning_rate=5e-5,            # AdamW 优化器的初始学习率
    warmup_ratio=0.1,              # 预热步数比例
    weight_decay=0.01,             # 权重衰减系数，应用于除 bias 和 LayerNorm 权重外的所有层
    #logging_steps=10,              # 日志记录频率 (步数)
    eval_strategy="steps",         # 评估策略：每 `eval_steps` 步进行评估
    eval_steps=50,                 # 评估频率 (步数)
    save_strategy="steps",         # 模型保存策略：每 `save_steps` 步保存模型
    save_steps=50,                 # 模型保存频率 (步数)
    #save_total_limit=2,            # 最大 checkpoint 保存数量，超出限制则删除旧 checkpoint
    dataloader_num_workers=0,      # 数据加载器 worker 数
    seed=42,                       # 随机种子，用于保证实验可复现
    bf16=False,                     # 启用混合精度 BF16 训练
    push_to_hub=False,             # 是否将模型推送至 Hugging Face Hub
    gradient_checkpointing=True,   # 启用梯度检查点
    report_to="none",              # 不使用任何报告工具
)

script_args = GRPOScriptArguments()
model_args = ModelConfig()
reward_functions = get_reward_functions(script_args)
callbacks = get_callbacks(training_args,model_args,script_args)

grpo_config = GRPOConfig(
    **training_args.to_dict(),
    **{

    }
)
grpo_trainer = GRPOTrainer(
    model=model,
    reward_funcs=reward_functions,
    args=grpo_config,
    train_dataset=dataset['train'],
    eval_dataset=dataset['test'],
    callbacks=callbacks
)
train_result = grpo_trainer.train()
