Text Generation
Transformers
PyTorch
English
gpt_neox
causal-lm
Inference Endpoints
text-generation-inference
jon-tow commited on
Commit
f7257a9
1 Parent(s): 53076a3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -30,7 +30,7 @@ tokenizer = AutoTokenizer.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
30
  model = AutoModelForCausalLM.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
31
  model.half().cuda()
32
 
33
- inputs = tokenizer("What's your mood today?", return_tensors="pt").to('cuda')
34
  tokens = model.generate(
35
  **inputs,
36
  max_new_tokens=64,
 
30
  model = AutoModelForCausalLM.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
31
  model.half().cuda()
32
 
33
+ inputs = tokenizer("What's your mood today?", return_tensors="pt").to("cuda")
34
  tokens = model.generate(
35
  **inputs,
36
  max_new_tokens=64,