gorkemgoknar commited on
Commit
df3cec8
1 Parent(s): 624c809

V2 -86 perpex %28 Accurary -- 0 4.610025 4.462641 0.289884 86.716248 2:34:50

Browse files
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
1
  {
 
2
  "activation_function": "gelu_new",
3
  "architectures": [
4
  "GPT2LMHeadModel"
merges.txt CHANGED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23d82bf4125cb86da2b61ef248ffbf6daee583dcaaefa9bc6457c179a67306ba
3
- size 510406796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bba84828153cb657ee2bd273a9b27587ca473a022d8eef52dc24514311f75390
3
+ size 510406901
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>", "pad_token": "<|endoftext|>"}
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "pad_token": "<|endoftext|>", "special_tokens_map_file": null, "name_or_path": "/root/.fastai/data/trwiki/ByteLevelBPE_tokenizer_tr", "errors": "replace"}
 
1
+ {"pad_token": "<|endoftext|>", "special_tokens_map_file": null, "full_tokenizer_file": null}
vocab.json CHANGED
The diff for this file is too large to render. See raw diff