import os 
import torch
import transformers
from transformers import(
    AutoModelForCausalLM,
    AutoTokenizer,
    HfArgumentParser,
    TrainingArguments,
    set_seed,
    TrainerCallback,
    TrainerControl,
    TrainerState
)
from transformers import AutoTokenizer,AutoModelForCausalLM
from transformers.trainer_utils import get_last_checkpoint
import datasets
from datasets import load_dataset
from trl import ( 
    AutoModelForCausalLMWithValueHead,
    PPOConfig,
    PPOTrainer,
    GRPOTrainer,
    GRPOConfig,
    SFTTrainer
)
from latex2sympy2_extended import NormalizationConfig
from math_verify import LatexExtractionConfig, parse, verify
#os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'  # 添加镜像源
MODEL_NAME ="/Users/yishanli/python/myProject/train-model/model"
OUTPUT_DIR ="data/Qwen-GRPO-training"
# 创建输出目录，如果目录不存在
os.makedirs(OUTPUT_DIR, exist_ok=True)
# 初始化 tokenizer，并指定聊天模板
tokenizer = AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
    MODEL_NAME,trust_remote_code=True,padding_side="right"
)
# 若 pad token 未设置deephub，则指定 pad token 为 eos token
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

print(f"Vocabulary size: {len(tokenizer)}")
print(f"Model max length: {tokenizer.model_max_length}")
print(f"Pad token: {tokenizer.pad_token}")
print(f"EOS token: {tokenizer.eos_token}")


# 初始化基础模型
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME,trust_remote_code=True,torch_dtype=torch.bfloat16)
print(f"Model parameters: {model.num_parameters():,}")


# 检查 CUDA 是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

def test_model_inference(user_input: str):
    messages = [
        {"role":"system","content":"You are Qwen, a helpful assistant."},
        {"role":"user","content":user_input}
    ]
    text = tokenizer.apply_chat_template(
        messages,tokenize=False,add_generation_prompt=True
    )
    inputs = tokenizer(text,return_tensors='pt').to(device)
    outputs = model.generate(
        **inputs,
        max_new_tokens=100,
        do_sample=True,
        temperature=0.7
    )
    response = tokenizer.decode(outputs[0],skip_special_tokens=True)
    return response
test_input = "how are you?"
response = test_model_inference(test_input)
print(f"Test Input:{test_input}")
print(f"Model Response:{response}")

