import torch
from modelscope import AutoModelForCausalLM, AutoTokenizer

from pytorch_lightning import Trainer
model_name = "Qwen/Qwen3-0.6B"

# load the tokenizer and the model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.bfloat16,
    device_map="auto"
)



from trl import SFTTrainer, SFTConfig
# 配置训练参数
train_args = SFTConfig(
    dataset_text_field="text",  # 需要与实际字段名一致
    per_device_train_batch_size=2,
    gradient_accumulation_steps=4,
    max_steps=100,
    learning_rate=2e-4,
    warmup_steps=10,
    logging_steps=20,
    optim="adamw_torch",
    weight_decay=0.01,
    lr_scheduler_type="linear",
    seed=666,
    report_to="none",
)

import get_dataset
# 初始化Trainer
trainer = SFTTrainer(
    model=model,
    data_collator=get_dataset.collator,
    train_dataset=get_dataset.train_dataset,  # 使用转换后的Dataset
    args=train_args,
)

# 开始训练
trainer_stats = trainer.train()
print(trainer_stats)
torch.save(model.state_dict(), "./cat_peft.pth")




