# -*- coding: utf-8 -*-
# time: 2025/4/21 11:31
# file: disill_load_model_ts.py
# author: hanson
import torch
from transformers import AutoModel, AutoTokenizer
#device = -1  # 强制使用CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 加载模型和tokenizer
loaded_student = AutoModel.from_pretrained("student_model_hf").to(device)
loaded_tokenizer = AutoTokenizer.from_pretrained("student_model_hf")

#3. 测试加载后的模型
# 生成文本测试
def generate_text(model, tokenizer, prompt, max_length=20):
    input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
    output = model.generate(input_ids, max_length=max_length)  # 需实现generate方法
    return tokenizer.decode(output[0], skip_special_tokens=True)

prompt = "今天的天气"
print("加载后的学生模型生成:", generate_text(loaded_student, loaded_tokenizer, prompt))
