brunneis commited on
Commit
c741ffd
1 Parent(s): dde9d7e

Improved supervisor with deepset/tinyroberta-6l-768d

Browse files
config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "_name_or_path": "./ernie-autosave/xlmroberta/1714666085842",
3
  "architectures": [
4
- "XLMRobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
@@ -16,15 +16,15 @@
16
  "language": "english",
17
  "layer_norm_eps": 1e-05,
18
  "max_position_embeddings": 514,
19
- "model_type": "xlm-roberta",
20
- "name": "XLMRoberta",
21
  "num_attention_heads": 12,
22
- "num_hidden_layers": 12,
23
- "output_past": true,
24
  "pad_token_id": 1,
25
  "position_embedding_type": "absolute",
 
26
  "transformers_version": "4.24.0",
27
  "type_vocab_size": 1,
28
  "use_cache": true,
29
- "vocab_size": 250002
30
  }
 
1
  {
2
+ "_name_or_path": "deepset/tinyroberta-6l-768d",
3
  "architectures": [
4
+ "RobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
 
16
  "language": "english",
17
  "layer_norm_eps": 1e-05,
18
  "max_position_embeddings": 514,
19
+ "model_type": "roberta",
20
+ "name": "Roberta",
21
  "num_attention_heads": 12,
22
+ "num_hidden_layers": 6,
 
23
  "pad_token_id": 1,
24
  "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
  "transformers_version": "4.24.0",
27
  "type_vocab_size": 1,
28
  "use_cache": true,
29
+ "vocab_size": 50265
30
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:324b63541b17025ff3d54e116087638b4d561e9527e96ca10f08926599fb9464
3
- size 1112470336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8260bd1e75816fb830aa02bed77289950e69ae35eab0524d6f9a697c28912087
3
+ size 328629336
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "do_lower_case": false,
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
+ "mask_token": "<mask>",
9
+ "max_len": 512,
10
+ "model_max_length": 512,
11
+ "name_or_path": "deepset/tinyroberta-6l-768d",
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "special_tokens_map_file": null,
15
+ "tokenizer_class": "RobertaTokenizer",
16
+ "trim_offsets": true,
17
+ "unk_token": "<unk>"
18
+ }
training.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epochs": 2,
3
+ "learning_rate": 1e-05,
4
+ "model_name": "deepset/tinyroberta-6l-768d",
5
+ "evaluation": [
6
+ 0.0008178171037303006,
7
+ 0.0007848538627519552,
8
+ 0.9992092289536872,
9
+ 0.0007827766833699012,
10
+ 0.9991875151053535,
11
+ 0.0007897196128086037,
12
+ 0.999194377478458,
13
+ 0.0007847069126737383,
14
+ 0.9991912057171757,
15
+ 0.0007855461690940979,
16
+ 0.9991958935677381,
17
+ 0.000785352314220833,
18
+ 0.9991970388540179,
19
+ 0.0007879251506279772,
20
+ 0.9991914060750224,
21
+ 0.0007884886635244337,
22
+ 0.9992020577928902,
23
+ 0.0007888689475750166,
24
+ 0.9992294733418028,
25
+ 0.0007840781050665324,
26
+ 0.9992103814093027,
27
+ 0.0007828270352403521,
28
+ 0.9992048671582784,
29
+ 0.0007829428568245644,
30
+ 0.9992107893169417
31
+ ]
32
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff