Spaces:
Runtime error
Runtime error
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig | |
import torch | |
model_id = "Ogero79/threatscope-cyberthreat-analyst" | |
# Load tokenizer | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
# Load config and clean up quantization | |
config = AutoConfig.from_pretrained(model_id) | |
if hasattr(config, "quantization_config"): | |
config.quantization_config = None | |
# Load model on CPU without device_map or dtype tricks | |
model = AutoModelForCausalLM.from_pretrained(model_id, config=config) | |
# Prepare prompt | |
prompt = "What is a cyber threat?" | |
inputs = tokenizer(prompt, return_tensors="pt") | |
# Run inference | |
with torch.no_grad(): | |
outputs = model.generate(**inputs, max_new_tokens=50) | |
# Show result | |
print(tokenizer.decode(outputs[0], skip_special_tokens=True)) | |