Text Generation
Transformers
Safetensors
English
olmo
Inference Endpoints
dirkgr commited on
Commit
49c6a9d
1 Parent(s): 8e99543

Trailing space breaks tokenization

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -69,7 +69,7 @@ Quickly get inference running with the following:
69
  from transformers import AutoModelForCausalLM, AutoTokenizer
70
  olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-1B-hf")
71
  tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf")
72
- message = ["Language modeling is "]
73
  inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False)
74
  # optional verifying cuda
75
  # inputs = {k: v.to('cuda') for k,v in inputs.items()}
 
69
  from transformers import AutoModelForCausalLM, AutoTokenizer
70
  olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-1B-hf")
71
  tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf")
72
+ message = ["Language modeling is"]
73
  inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False)
74
  # optional verifying cuda
75
  # inputs = {k: v.to('cuda') for k,v in inputs.items()}