asquevedos commited on
Commit
3133d17
1 Parent(s): 9226b57

Training in progress, epoch 1

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 250101
3
+ }
config.json CHANGED
@@ -1,12 +1,9 @@
1
  {
2
- "_name_or_path": "xlm-roberta-base",
3
  "architectures": [
4
- "XLMRobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "classifier_dropout": null,
9
- "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
@@ -272,18 +269,27 @@
272
  "LABEL_98": 98,
273
  "LABEL_99": 99
274
  },
275
- "layer_norm_eps": 1e-05,
276
- "max_position_embeddings": 514,
277
- "model_type": "xlm-roberta",
 
 
278
  "num_attention_heads": 12,
279
  "num_hidden_layers": 12,
280
- "output_past": true,
281
- "pad_token_id": 1,
282
- "position_embedding_type": "absolute",
283
- "problem_type": "single_label_classification",
 
 
 
 
 
 
 
 
284
  "torch_dtype": "float32",
285
  "transformers_version": "4.39.3",
286
- "type_vocab_size": 1,
287
- "use_cache": true,
288
- "vocab_size": 250002
289
  }
 
1
  {
2
+ "_name_or_path": "microsoft/mdeberta-v3-base",
3
  "architectures": [
4
+ "DebertaV2ForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
 
 
7
  "hidden_act": "gelu",
8
  "hidden_dropout_prob": 0.1,
9
  "hidden_size": 768,
 
269
  "LABEL_98": 98,
270
  "LABEL_99": 99
271
  },
272
+ "layer_norm_eps": 1e-07,
273
+ "max_position_embeddings": 512,
274
+ "max_relative_positions": -1,
275
+ "model_type": "deberta-v2",
276
+ "norm_rel_ebd": "layer_norm",
277
  "num_attention_heads": 12,
278
  "num_hidden_layers": 12,
279
+ "pad_token_id": 0,
280
+ "pooler_dropout": 0,
281
+ "pooler_hidden_act": "gelu",
282
+ "pooler_hidden_size": 768,
283
+ "pos_att_type": [
284
+ "p2c",
285
+ "c2p"
286
+ ],
287
+ "position_biased_input": false,
288
+ "position_buckets": 256,
289
+ "relative_attention": true,
290
+ "share_att_key": true,
291
  "torch_dtype": "float32",
292
  "transformers_version": "4.39.3",
293
+ "type_vocab_size": 0,
294
+ "vocab_size": 251000
 
295
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebce5ac3cc6294a0407900ca7d2f1ea59aff16701829f01b6c913771a9e6e62d
3
- size 1112592592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff3b13d3afff1d5cc5e876263fed69a23740ab57a0681cd0d5f0b97f2fcbb304
3
+ size 1115655792
special_tokens_map.json CHANGED
@@ -1,36 +1,14 @@
1
  {
2
- "cls_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "mask_token": {
10
- "content": "[MASK]",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "[PAD]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "sep_token": {
24
- "content": "[SEP]",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
  "unk_token": {
31
  "content": "[UNK]",
32
  "lstrip": false,
33
- "normalized": false,
34
  "rstrip": false,
35
  "single_word": false
36
  }
 
1
  {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "unk_token": {
9
  "content": "[UNK]",
10
  "lstrip": false,
11
+ "normalized": true,
12
  "rstrip": false,
13
  "single_word": false
14
  }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13c8d666d62a7bc4ac8f040aab68e942c861f93303156cc28f5c7e885d86d6e3
3
+ size 4305025
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:03ba3a5e5da2b79f509f322ec5b8669cad2c4676234a70cd37b634f945c15419
3
- size 2213583
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f42839ec04c3025630c31f069df13d750b5a3c969345d768ca8e0da9119600c7
3
+ size 16331661
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[MASK]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
@@ -9,31 +9,31 @@
9
  "special": true
10
  },
11
  "1": {
12
- "content": "[PAD]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
- "3": {
20
- "content": "[UNK]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "4": {
28
- "content": "[CLS]",
29
  "lstrip": false,
30
- "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
- "5": {
36
- "content": "[SEP]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
@@ -41,17 +41,18 @@
41
  "special": true
42
  }
43
  },
 
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
- "do_basic_tokenize": true,
47
  "do_lower_case": false,
 
48
  "mask_token": "[MASK]",
49
- "model_max_length": 512,
50
- "never_split": null,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
- "strip_accents": false,
54
- "tokenize_chinese_chars": true,
55
- "tokenizer_class": "BertTokenizer",
56
- "unk_token": "[UNK]"
 
57
  }
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
9
  "special": true
10
  },
11
  "1": {
12
+ "content": "[CLS]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
+ "content": "[SEP]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "3": {
28
+ "content": "[UNK]",
29
  "lstrip": false,
30
+ "normalized": true,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "250101": {
36
+ "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
 
41
  "special": true
42
  }
43
  },
44
+ "bos_token": "[CLS]",
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
 
47
  "do_lower_case": false,
48
+ "eos_token": "[SEP]",
49
  "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
 
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
+ "sp_model_kwargs": {},
54
+ "split_by_punct": false,
55
+ "tokenizer_class": "DebertaV2Tokenizer",
56
+ "unk_token": "[UNK]",
57
+ "vocab_type": "spm"
58
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30ee3128d610c94daecd2bd3143ace360d34d42c725f06eb9cde2d4423b5c122
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3a0a7eca43df258134a69521041b1b8e1dbb39c20ed610e75e349b29c59c14f
3
  size 4984