dougtrajano commited on
Commit
7dbd7d6
1 Parent(s): 07d6a9a

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "neuralmind/bert-base-portuguese-cased",
3
  "architectures": [
4
  "ToxicityTypeForSequenceClassification"
5
  ],
@@ -8,14 +8,14 @@
8
  "directionality": "bidi",
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
  "id2label": {
13
  "0": "INDIVIDUAL",
14
  "1": "GROUP",
15
  "2": "OTHER"
16
  },
17
  "initializer_range": 0.02,
18
- "intermediate_size": 3072,
19
  "label2id": {
20
  "GROUP": 1,
21
  "INDIVIDUAL": 0,
@@ -24,8 +24,8 @@
24
  "layer_norm_eps": 1e-12,
25
  "max_position_embeddings": 512,
26
  "model_type": "bert",
27
- "num_attention_heads": 12,
28
- "num_hidden_layers": 12,
29
  "output_past": true,
30
  "pad_token_id": 0,
31
  "pooler_fc_size": 768,
 
1
  {
2
+ "_name_or_path": "neuralmind/bert-large-portuguese-cased",
3
  "architectures": [
4
  "ToxicityTypeForSequenceClassification"
5
  ],
 
8
  "directionality": "bidi",
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
  "id2label": {
13
  "0": "INDIVIDUAL",
14
  "1": "GROUP",
15
  "2": "OTHER"
16
  },
17
  "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
  "label2id": {
20
  "GROUP": 1,
21
  "INDIVIDUAL": 0,
 
24
  "layer_norm_eps": 1e-12,
25
  "max_position_embeddings": 512,
26
  "model_type": "bert",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
  "output_past": true,
30
  "pad_token_id": 0,
31
  "pooler_fc_size": 768,
last-checkpoint/config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "neuralmind/bert-large-portuguese-cased",
3
+ "architectures": [
4
+ "ToxicityTypeForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "INDIVIDUAL",
14
+ "1": "GROUP",
15
+ "2": "OTHER"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
+ "label2id": {
20
+ "GROUP": 1,
21
+ "INDIVIDUAL": 0,
22
+ "OTHER": 2
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "bert",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "output_past": true,
30
+ "pad_token_id": 0,
31
+ "pooler_fc_size": 768,
32
+ "pooler_num_attention_heads": 12,
33
+ "pooler_num_fc_layers": 3,
34
+ "pooler_size_per_head": 128,
35
+ "pooler_type": "first_token_transform",
36
+ "position_embedding_type": "absolute",
37
+ "problem_type": "single_label_classification",
38
+ "torch_dtype": "float32",
39
+ "transformers_version": "4.26.1",
40
+ "type_vocab_size": 2,
41
+ "use_cache": true,
42
+ "vocab_size": 29794
43
+ }
last-checkpoint/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b924c4df2a7a383c6e21fa2b4994f0707e75646c0353d02238d3531a5866707f
3
+ size 3468339
last-checkpoint/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ce392739bf37bbf308c0e0d58c24d7f62006911cfb0ccef33aa174cfcc5ab60
3
+ size 1337733933
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e70f9bacfce971fc134d218db94ce2540e833beff8b4f6613ef42305ac2b6fee
3
+ size 14503
last-checkpoint/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80e3bce3c0ea15d7b424ec6761f0c5066770082ab56e25f2ea2827bbbc47848b
3
+ size 623
last-checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
last-checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "name_or_path": "neuralmind/bert-large-portuguese-cased",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "special_tokens_map_file": "/root/.cache/huggingface/hub/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json",
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
last-checkpoint/trainer_state.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7052333645117603,
3
+ "best_model_checkpoint": "/opt/ml/checkpoints/toxicity-target-type-identification-2023-02-26-11-44-04-863/model/checkpoint-355",
4
+ "epoch": 1.0,
5
+ "global_step": 355,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.6902748414376322,
13
+ "eval_f1": 0.7052333645117603,
14
+ "eval_loss": 0.7145297527313232,
15
+ "eval_precision": 0.7527983972979847,
16
+ "eval_recall": 0.6902748414376322,
17
+ "eval_runtime": 18.5563,
18
+ "eval_samples_per_second": 50.98,
19
+ "eval_steps_per_second": 6.413,
20
+ "step": 355
21
+ }
22
+ ],
23
+ "max_steps": 10650,
24
+ "num_train_epochs": 30,
25
+ "total_flos": 491451660564594.0,
26
+ "trial_name": null,
27
+ "trial_params": null
28
+ }
last-checkpoint/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fee243adf1d020640843f6197611182ca090919cb15d3455ae60e528b4209a
3
+ size 3695
last-checkpoint/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ec383c18a5b94a5d739dec9c5befc670c3fc650f033ee271224cc462f76d529
3
- size 435772781
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ce392739bf37bbf308c0e0d58c24d7f62006911cfb0ccef33aa174cfcc5ab60
3
+ size 1337733933
tokenizer_config.json CHANGED
@@ -4,11 +4,11 @@
4
  "do_lower_case": false,
5
  "mask_token": "[MASK]",
6
  "model_max_length": 1000000000000000019884624838656,
7
- "name_or_path": "neuralmind/bert-base-portuguese-cased",
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
- "special_tokens_map_file": "/root/.cache/huggingface/hub/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json",
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
  "tokenizer_class": "BertTokenizer",
 
4
  "do_lower_case": false,
5
  "mask_token": "[MASK]",
6
  "model_max_length": 1000000000000000019884624838656,
7
+ "name_or_path": "neuralmind/bert-large-portuguese-cased",
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
+ "special_tokens_map_file": "/root/.cache/huggingface/hub/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json",
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
  "tokenizer_class": "BertTokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29ea1e9a63ff6356100cc945f6ca8285154323a795ec0916bd9de37f42dc85eb
3
  size 3695
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fee243adf1d020640843f6197611182ca090919cb15d3455ae60e528b4209a
3
  size 3695