import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = AutoModelForCausalLM.from_pretrained('simpx/noob', trust_remote_code=True)
model = model.to(device)
model.eval()
tokenizer = AutoTokenizer.from_pretrained('simpx/noob', trust_remote_code=True)
context = torch.zeros((1, 1), dtype=torch.long, device=device)
with torch.no_grad():
output_ids = model.generate(context, max_new_tokens=100)[0].tolist()
output_text = tokenizer.decode(output_ids)
print(output_text)
- Downloads last month
- 6
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
๐
Ask for provider support
HF Inference deployability: The model has no library tag.