w11wo commited on
Commit
d74ce70
1 Parent(s): 94e4296

pytorch model

Browse files
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "RobertaForMaskedLM"
4
  ],
@@ -18,6 +19,7 @@
18
  "num_hidden_layers": 24,
19
  "pad_token_id": 1,
20
  "position_embedding_type": "absolute",
 
21
  "transformers_version": "4.9.0.dev0",
22
  "type_vocab_size": 1,
23
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "./",
3
  "architectures": [
4
  "RobertaForMaskedLM"
5
  ],
 
19
  "num_hidden_layers": 24,
20
  "pad_token_id": 1,
21
  "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
  "transformers_version": "4.9.0.dev0",
24
  "type_vocab_size": 1,
25
  "use_cache": true,
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b301146ffbe4ae877d1ab09fbdd0684b99c18551120d3c22c60d1f1dfce31fe0
3
- size 711588089
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db38f2c9c6fbd22b34aeac8405dcd2a3cc28520045e91ff6970302d6694cffaa
3
+ size 1421662309
flax_to_torch.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import FlaxRobertaForMaskedLM, RobertaForMaskedLM, AutoTokenizer
2
+ import jax
3
+ import jax.numpy as jnp
4
+
5
+
6
+ def to_f32(t):
7
+ return jax.tree_map(
8
+ lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, t
9
+ )
10
+
11
+
12
+ # load flax fp16 model
13
+ model = FlaxRobertaForMaskedLM.from_pretrained("./")
14
+ # convert to fp32 model
15
+ model.params = to_f32(model.params)
16
+ # save flax fp32 model
17
+ model.save_pretrained("./")
18
+
19
+ # convert flax fp32 model to pytorch
20
+ model_pt = RobertaForMaskedLM.from_pretrained("./", from_flax=True)
21
+ model_pt.save_pretrained("./")
22
+
23
+ tokenizer = AutoTokenizer.from_pretrained("./")
24
+ tokenizer.save_pretrained("./")
25
+
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
nohup.out CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fd23419e898ca51d3fde0021241dba0f6a51ef70fb7a1b35e5b3a8f80f8b8f0
3
- size 19723143
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2462fd513ba9b6a8c043fc3a7a7bed1c49514a34106409226c715fddf4b5f90
3
+ size 19725834
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7fe5513a7308eb4a7dd8369b78ef18ee16cc98f612208827e2877a47e1696c9
3
+ size 1421780139
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "RobertaTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff