Text Generation
Transformers
PyTorch
English
gpt_neox
causal-lm
Inference Endpoints
text-generation-inference
jon-tow commited on
Commit
a917926
1 Parent(s): 64fb8c1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -28,7 +28,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
28
 
29
  tokenizer = AutoTokenizer.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
30
  model = AutoModelForCausalLM.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
31
- model.half().cuda()
32
 
33
  inputs = tokenizer("What's your mood today?", return_tensors="pt")
34
  tokens = model.generate(
 
28
 
29
  tokenizer = AutoTokenizer.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
30
  model = AutoModelForCausalLM.from_pretrained("StabilityAI/stablelm-tuned-alpha-7b")
31
+ model.half()
32
 
33
  inputs = tokenizer("What's your mood today?", return_tensors="pt")
34
  tokens = model.generate(