Chydfile
commited on
Commit
•
33a98c4
1
Parent(s):
c942331
First model version_009
Browse files- 1.json +0 -1
- merges.txt +0 -0
- pytorch_model.bin +1 -1
- special_tokens_map.json +1 -1
- tokenizer_config.json +1 -1
- vocab.json +0 -0
1.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": "./model_v5_pytorch_rus_books_games/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "./model_v5_pytorch_rus_books_games/", "tokenizer_class": "GPT2Tokenizer"}
|
|
|
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 551300711
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8bebba99b28df3a4359124d47fd169ec2ac9217287c61ce7a36b1ec1e4fbd42b
|
3 |
size 551300711
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
|
|
|
1 |
+
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "sberbank-ai/
|
|
|
1 |
+
{"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "sberbank-ai/rugpt2large", "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|