Text Generation
Transformers
PyTorch
English
gpt_neox
causal-lm
Inference Endpoints
text-generation-inference
jon-tow commited on
Commit
53076a3
1 Parent(s): bd7f98d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -28,8 +28,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
28
 
29
  tokenizer = AutoTokenizer.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
30
  model = AutoModelForCausalLM.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
 
31
 
32
- inputs = tokenizer("What's your mood today?", return_tensors="pt")
33
  tokens = model.generate(
34
  **inputs,
35
  max_new_tokens=64,
 
28
 
29
  tokenizer = AutoTokenizer.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
30
  model = AutoModelForCausalLM.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
31
+ model.half().cuda()
32
 
33
+ inputs = tokenizer("What's your mood today?", return_tensors="pt").to('cuda')
34
  tokens = model.generate(
35
  **inputs,
36
  max_new_tokens=64,