import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

device = 'cuda' if torch.cuda.is_available() else 'cpu'

model = AutoModelForCausalLM.from_pretrained('simpx/noob', trust_remote_code=True)
model = model.to(device)
model.eval()
tokenizer = AutoTokenizer.from_pretrained('simpx/noob', trust_remote_code=True)

context = torch.zeros((1, 1), dtype=torch.long, device=device)

with torch.no_grad():
    output_ids = model.generate(context, max_new_tokens=100)[0].tolist()
    output_text = tokenizer.decode(output_ids)
    print(output_text)
Downloads last month
6
Safetensors
Model size
13.1M params
Tensor type
F32
ยท
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support