w11wo commited on
Commit
a932a01
1 Parent(s): 14cd070

pytorch model

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<|endoftext|>": 50265}
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "activation_function": "gelu_new",
3
  "architectures": [
4
  "GPT2LMHeadModel"
@@ -30,6 +31,7 @@
30
  "max_length": 50
31
  }
32
  },
 
33
  "transformers_version": "4.9.0.dev0",
34
  "use_cache": true,
35
  "vocab_size": 50257
 
1
  {
2
+ "_name_or_path": "./",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
31
  "max_length": 50
32
  }
33
  },
34
+ "torch_dtype": "float32",
35
  "transformers_version": "4.9.0.dev0",
36
  "use_cache": true,
37
  "vocab_size": 50257
flax_to_torch.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2LMHeadModel, AutoTokenizer
2
+
3
+ model = GPT2LMHeadModel.from_pretrained("./", from_flax=True)
4
+ model.save_pretrained("./")
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained("./")
7
+ tokenizer.save_pretrained("./")
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
nohup.out CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a52ab20f4d7b2cc559515131723200797a3e8103da786ab1e520fb1539148aae
3
- size 2123344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f63d8b41f1df6c2406fce88d0c5bfb81a80d67508dac7f1dbd57df30ab79dbde
3
+ size 2124081
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80cb5b2f48df1468d6372543d79c48c0ae12218c704f40d294a0a45181a2085f
3
+ size 510401385
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "GPT2Tokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff