Gampanut commited on
Commit
4ca6d1e
1 Parent(s): afe9bd2

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ entity_vocab.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  license: apache-2.0
3
- base_model: google-bert/bert-base-multilingual-cased
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,9 +13,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # exp_number1
15
 
16
- This model is a fine-tuned version of [google-bert/bert-base-multilingual-cased](https://huggingface.co/google-bert/bert-base-multilingual-cased) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 0.2623
19
 
20
  ## Model description
21
 
@@ -34,28 +34,26 @@ More information needed
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
- - learning_rate: 1e-05
38
- - train_batch_size: 8
39
  - eval_batch_size: 8
40
  - seed: 42
 
 
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
- - num_epochs: 10
 
44
 
45
  ### Training results
46
 
47
  | Training Loss | Epoch | Step | Validation Loss |
48
  |:-------------:|:-----:|:----:|:---------------:|
49
- | 0.3388 | 1.0 | 443 | 0.4651 |
50
- | 0.3435 | 2.0 | 886 | 0.3720 |
51
- | 0.2449 | 3.0 | 1329 | 0.3122 |
52
- | 0.1175 | 4.0 | 1772 | 0.3218 |
53
- | 0.1389 | 5.0 | 2215 | 0.3442 |
54
- | 0.149 | 6.0 | 2658 | 0.2398 |
55
- | 0.1102 | 7.0 | 3101 | 0.2547 |
56
- | 0.0479 | 8.0 | 3544 | 0.2352 |
57
- | 0.0651 | 9.0 | 3987 | 0.2485 |
58
- | 0.0806 | 10.0 | 4430 | 0.2623 |
59
 
60
 
61
  ### Framework versions
 
1
  ---
2
  license: apache-2.0
3
+ base_model: studio-ousia/mluke-base
4
  tags:
5
  - generated_from_trainer
6
  model-index:
 
13
 
14
  # exp_number1
15
 
16
+ This model is a fine-tuned version of [studio-ousia/mluke-base](https://huggingface.co/studio-ousia/mluke-base) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 0.2131
19
 
20
  ## Model description
21
 
 
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
+ - learning_rate: 5e-05
38
+ - train_batch_size: 4
39
  - eval_batch_size: 8
40
  - seed: 42
41
+ - gradient_accumulation_steps: 4
42
+ - total_train_batch_size: 16
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
45
+ - lr_scheduler_warmup_ratio: 0.1
46
+ - num_epochs: 5
47
 
48
  ### Training results
49
 
50
  | Training Loss | Epoch | Step | Validation Loss |
51
  |:-------------:|:-----:|:----:|:---------------:|
52
+ | 0.4351 | 1.0 | 221 | 0.5519 |
53
+ | 0.3954 | 2.0 | 443 | 0.3151 |
54
+ | 0.2607 | 3.0 | 664 | 0.3064 |
55
+ | 0.222 | 4.0 | 886 | 0.2342 |
56
+ | 0.1296 | 4.99 | 1105 | 0.2131 |
 
 
 
 
 
57
 
58
 
59
  ### Framework versions
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<ent2>": 250003,
3
+ "<ent>": 250002
4
+ }
config.json CHANGED
@@ -1,32 +1,37 @@
1
  {
2
- "_name_or_path": "google-bert/bert-base-multilingual-cased",
3
  "architectures": [
4
- "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
 
7
  "classifier_dropout": null,
8
- "directionality": "bidi",
 
 
 
 
 
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 3072,
14
- "layer_norm_eps": 1e-12,
15
- "max_position_embeddings": 512,
16
- "model_type": "bert",
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 12,
19
- "pad_token_id": 0,
20
- "pooler_fc_size": 768,
21
- "pooler_num_attention_heads": 12,
22
- "pooler_num_fc_layers": 3,
23
- "pooler_size_per_head": 128,
24
- "pooler_type": "first_token_transform",
25
  "position_embedding_type": "absolute",
26
  "problem_type": "single_label_classification",
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.39.3",
29
- "type_vocab_size": 2,
30
  "use_cache": true,
31
- "vocab_size": 119547
 
 
32
  }
 
1
  {
2
+ "_name_or_path": "studio-ousia/mluke-base",
3
  "architectures": [
4
+ "LukeForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
+ "bert_model_name": "xlm-roberta-base",
8
+ "bos_token_id": 0,
9
  "classifier_dropout": null,
10
+ "cls_entity_prediction": false,
11
+ "deepspeed_transformer_layer_args": {},
12
+ "entity_emb_size": 256,
13
+ "entity_vocab_size": 1200001,
14
+ "eos_token_id": 2,
15
+ "gradient_checkpointing": false,
16
  "hidden_act": "gelu",
17
  "hidden_dropout_prob": 0.1,
18
  "hidden_size": 768,
19
  "initializer_range": 0.02,
20
  "intermediate_size": 3072,
21
+ "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 514,
23
+ "model_type": "luke",
24
  "num_attention_heads": 12,
25
  "num_hidden_layers": 12,
26
+ "output_past": true,
27
+ "pad_token_id": 1,
 
 
 
 
28
  "position_embedding_type": "absolute",
29
  "problem_type": "single_label_classification",
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.39.3",
32
+ "type_vocab_size": 1,
33
  "use_cache": true,
34
+ "use_deepspeed_transformer_layer": false,
35
+ "use_entity_aware_attention": false,
36
+ "vocab_size": 250004
37
  }
entity_vocab.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebeabade0acbb5f6fb6562490c081441746f7bf9bd06a3fa8d86b0bce7461c8d
3
+ size 336948106
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb5534fe2da667670d4573836309d688d6c60ac1cfe278ad584d439866e4efc5
3
- size 711443456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8eb5efbee3c159e9bfe0f109f5bf50badf6de76910b6f5f20adc7abd2f7c600
3
+ size 2343386936
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json CHANGED
@@ -1,7 +1,45 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<ent>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<ent2>",
12
+ "lstrip": false,
13
+ "normalized": true,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<ent>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "<ent2>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ ],
32
+ "bos_token": "<s>",
33
+ "cls_token": "<s>",
34
+ "eos_token": "</s>",
35
+ "mask_token": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
+ "pad_token": "<pad>",
43
+ "sep_token": "</s>",
44
+ "unk_token": "<unk>"
45
  }
tokenizer_config.json CHANGED
@@ -1,55 +1,102 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
- "100": {
12
- "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
- "101": {
20
- "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "102": {
28
- "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
- "103": {
36
- "content": "[MASK]",
 
 
 
 
 
 
 
 
37
  "lstrip": false,
38
- "normalized": false,
 
 
 
 
 
 
 
 
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
  }
43
  },
 
 
 
 
 
 
 
44
  "clean_up_tokenization_spaces": true,
45
- "cls_token": "[CLS]",
46
- "do_lower_case": false,
47
- "mask_token": "[MASK]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  "model_max_length": 512,
49
- "pad_token": "[PAD]",
50
- "sep_token": "[SEP]",
51
- "strip_accents": null,
52
- "tokenize_chinese_chars": true,
53
- "tokenizer_class": "BertTokenizer",
54
- "unk_token": "[UNK]"
55
  }
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "<s>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
+ "content": "<pad>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
+ "content": "</s>",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "3": {
28
+ "content": "<unk>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "250002": {
44
+ "content": "<ent>",
45
  "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "250003": {
52
+ "content": "<ent2>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
  "rstrip": false,
56
  "single_word": false,
57
  "special": true
58
  }
59
  },
60
+ "additional_special_tokens": [
61
+ "<ent>",
62
+ "<ent2>",
63
+ "<ent>",
64
+ "<ent2>"
65
+ ],
66
+ "bos_token": "<s>",
67
  "clean_up_tokenization_spaces": true,
68
+ "cls_token": "<s>",
69
+ "entity_mask2_token": "[MASK2]",
70
+ "entity_mask_token": "[MASK]",
71
+ "entity_pad_token": "[PAD]",
72
+ "entity_token_1": {
73
+ "__type": "AddedToken",
74
+ "content": "<ent>",
75
+ "lstrip": false,
76
+ "normalized": true,
77
+ "rstrip": false,
78
+ "single_word": false,
79
+ "special": false
80
+ },
81
+ "entity_token_2": {
82
+ "__type": "AddedToken",
83
+ "content": "<ent2>",
84
+ "lstrip": false,
85
+ "normalized": true,
86
+ "rstrip": false,
87
+ "single_word": false,
88
+ "special": false
89
+ },
90
+ "entity_unk_token": "[UNK]",
91
+ "eos_token": "</s>",
92
+ "mask_token": "<mask>",
93
+ "max_entity_length": 32,
94
+ "max_mention_length": 30,
95
  "model_max_length": 512,
96
+ "pad_token": "<pad>",
97
+ "sep_token": "</s>",
98
+ "sp_model_kwargs": {},
99
+ "task": null,
100
+ "tokenizer_class": "MLukeTokenizer",
101
+ "unk_token": "<unk>"
102
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b22469dc7aac031b98bea37e4cfb674f9e1c73c99bd20848e6075e1a8d536f5
3
- size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e6e2251e262f9cd187f4ffee4f240581585b807a7b2e3491f7a8b515338a391
3
+ size 4920