Trailing space breaks tokenization
Browse files
README.md
CHANGED
@@ -69,7 +69,7 @@ Quickly get inference running with the following:
|
|
69 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
70 |
olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-1B-hf")
|
71 |
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf")
|
72 |
-
message = ["Language modeling is
|
73 |
inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False)
|
74 |
# optional verifying cuda
|
75 |
# inputs = {k: v.to('cuda') for k,v in inputs.items()}
|
|
|
69 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
70 |
olmo = AutoModelForCausalLM.from_pretrained("allenai/OLMo-1B-hf")
|
71 |
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf")
|
72 |
+
message = ["Language modeling is"]
|
73 |
inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False)
|
74 |
# optional verifying cuda
|
75 |
# inputs = {k: v.to('cuda') for k,v in inputs.items()}
|