Text Generation
Transformers
Safetensors
English
olmo
Inference Endpoints
dirkgr commited on
Commit
838af31
1 Parent(s): e4a9740

Trailing space breaks tokenization

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -69,7 +69,7 @@ Quickly get inference running with the following:
69
  from transformers import AutoModelForCausalLM, AutoTokenizer
70
  olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-7B-Twin-2T-hf")
71
  tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-7B-Twin-2T-hf")
72
- message = ["Language modeling is "]
73
  inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False)
74
  # optional verifying cuda
75
  # inputs = {k: v.to('cuda') for k,v in inputs.items()}
 
69
  from transformers import AutoModelForCausalLM, AutoTokenizer
70
  olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-7B-Twin-2T-hf")
71
  tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-7B-Twin-2T-hf")
72
+ message = ["Language modeling is"]
73
  inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False)
74
  # optional verifying cuda
75
  # inputs = {k: v.to('cuda') for k,v in inputs.items()}