import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import logging
# Suppress warnings
logging.getLogger("transformers").setLevel(logging.ERROR)
# Use local model path
model_path = "/workspace/Qwen2-0.5B/"
print(f"Loading model from: {model_path}")
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(
    model_path, 
    torch_dtype=torch.float16,
    device_map="auto"
)
# Input prompt
prompt = "请介绍一下中国科学院计算技术研究所的情况，包括其科研成果和排名情况和现任领导。"
print("n" + "="*50 + "\nINPUT:")
print(prompt)
# Generate response
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output_sequences = model.generate(
    **inputs,
    max_new_tokens=1000,
    do_sample=True,
    temperature=0.7
)
# Extract just the generated part, not including input
input_length = inputs.input_ids.shape[1]
response = tokenizer.decode(output_sequences[0][input_length:], skip_special_tokens=True)
# Print output
print("n" + "="*50 + "\nOUTPUT:")
print(response)
