import sys
sys.path.append("/Users/yishanli/python/myProject/train-model")
import os 
import torch
from datasets import load_dataset
from transformers import AutoTokenizer,AutoModelForCausalLM
#os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'  # 添加镜像源
MODEL_NAME ="/Users/yishanli/python/myProject/train-model/model"
OUTPUT_DIR ="data/Qwen-GRPO-training"
# 创建输出目录，如果目录不存在
os.makedirs(OUTPUT_DIR, exist_ok=True)
# 初始化 tokenizer，并指定聊天模板
tokenizer = AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
    MODEL_NAME,trust_remote_code=True,padding_side="right"
)
# 若 pad token 未设置deephub，则指定 pad token 为 eos token
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token


# 初始化基础模型
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME,trust_remote_code=True,torch_dtype=torch.bfloat16)
print(f"Model parameters: {model.num_parameters():,}")


# 检查 CUDA 是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

SYSTEM_PROMPT = (
    "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about" \
    "the reasoning process in the mind and then deephub provides the user with the answer. The reasoning process and answer are enclosed" \
    "within<think> </think> and <answer> </answer> tags, respectively, i.e <think> reasoning process here </think> <answer> answer here </answer>"

)
def make_conversation(example):
    return{
        "prompt":[
            {"role":"system","content":SYSTEM_PROMPT},
            {"role":"user","content":example["problem"]}
        ],
    }
def load_math_dataset():
    local_data_dir = '/Users/yishanli/python/myProject/train-model/train'
    dataset = load_dataset(
        local_data_dir,
        name="default",
        split=['train','test']
    )
    dataset = {
        'train':dataset[0],
        'test':dataset[1]
    }

    for split in dataset:
        dataset[split] = dataset[split].map(make_conversation)
        if "messages" in dataset[split].column_names:
            dataset[split] = dataset[split].remove_columns("messages")
    return dataset
    
dataset = load_math_dataset()
print(f"Train set size:{len(dataset['train'])}")
print(f"Test set size:{len(dataset['test'])}")

def validate_dataset(dataset):
    required_fields = ["problem","prompt"]
    for split in ['train','test']:
        print(f"\nValidating {split} split:")
        fields = dataset[split].column_names
        missing =[field for field in required_fields if field not in fields]
        if missing:
            print(f"Warning:Missing fields:{missing}")
        else:
            print("✅ All required fields present")
        sample = dataset[split][0]
        message = sample['prompt']
        if(len(message) >= 2 and 
           message[0]['role'] == 'system' and
           message[1]['role'] == 'user'):
            print("✅ Prompt format is correct")
        else:
            print("Warning: Incorrect prompt format")
validate_dataset(dataset)
