Upload tokenizer
Browse files- merges.txt +1 -1
- tokenizer_config.json +1 -1
- vocab.json +0 -0
merges.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#version: 0.2
|
2 |
Ġ t
|
3 |
Ġ a
|
4 |
h e
|
|
|
1 |
+
#version: 0.2 - Trained by `huggingface/tokenizers`
|
2 |
Ġ t
|
3 |
Ġ a
|
4 |
h e
|
tokenizer_config.json
CHANGED
@@ -19,7 +19,7 @@
|
|
19 |
},
|
20 |
"errors": "replace",
|
21 |
"model_max_length": 1000000000000000019884624838656,
|
22 |
-
"name_or_path": "
|
23 |
"pad_token": {
|
24 |
"__type": "AddedToken",
|
25 |
"content": "<pad>",
|
|
|
19 |
},
|
20 |
"errors": "replace",
|
21 |
"model_max_length": 1000000000000000019884624838656,
|
22 |
+
"name_or_path": "analogy_models/opt-350m-analogy-epoch15-p/model",
|
23 |
"pad_token": {
|
24 |
"__type": "AddedToken",
|
25 |
"content": "<pad>",
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|