BDAIO commited on
Commit
f1e1bd2
·
verified ·
1 Parent(s): cbf3639

End of training

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. README.md +73 -0
  3. config.json +46 -0
  4. fasttext_model.bin +3 -0
  5. model.safetensors +3 -0
  6. training_args.bin +3 -0
  7. vocab.txt +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ vocab.txt filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google-bert/bert-base-multilingual-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ - precision
9
+ - recall
10
+ - f1
11
+ model-index:
12
+ - name: NLP_90_1
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # NLP_90_1
20
+
21
+ This model is a fine-tuned version of [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased) on the None dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.3865
24
+ - Accuracy: 0.9037
25
+ - Precision: 0.8977
26
+ - Recall: 0.9047
27
+ - F1: 0.9000
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 1e-05
47
+ - train_batch_size: 32
48
+ - eval_batch_size: 32
49
+ - seed: 42
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: cosine
52
+ - num_epochs: 8
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
57
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
58
+ | 0.3492 | 1.0 | 48 | 0.3957 | 0.8853 | 0.8792 | 0.8866 | 0.8779 |
59
+ | 0.1968 | 2.0 | 96 | 0.3972 | 0.8761 | 0.8731 | 0.8681 | 0.8695 |
60
+ | 0.2027 | 3.0 | 144 | 0.3901 | 0.8945 | 0.8872 | 0.8907 | 0.8873 |
61
+ | 0.1707 | 4.0 | 192 | 0.3775 | 0.8899 | 0.8930 | 0.8793 | 0.8832 |
62
+ | 0.1673 | 5.0 | 240 | 0.4176 | 0.8991 | 0.8921 | 0.8997 | 0.8944 |
63
+ | 0.1113 | 6.0 | 288 | 0.4009 | 0.9037 | 0.8977 | 0.9047 | 0.9000 |
64
+ | 0.1084 | 7.0 | 336 | 0.3822 | 0.9037 | 0.8977 | 0.9047 | 0.9000 |
65
+ | 0.131 | 8.0 | 384 | 0.3865 | 0.9037 | 0.8977 | 0.9047 | 0.9000 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.42.4
71
+ - Pytorch 2.3.1+cu121
72
+ - Datasets 2.20.0
73
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-multilingual-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 12,
32
+ "num_hidden_layers": 12,
33
+ "pad_token_id": 0,
34
+ "pooler_fc_size": 768,
35
+ "pooler_num_attention_heads": 12,
36
+ "pooler_num_fc_layers": 3,
37
+ "pooler_size_per_head": 128,
38
+ "pooler_type": "first_token_transform",
39
+ "position_embedding_type": "absolute",
40
+ "problem_type": "single_label_classification",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.42.4",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 548133
46
+ }
fasttext_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1da72d5db2d908f77731aaba34cc4b9049fc57bfa9b707d6b5dd062cc0f841b
3
+ size 9534119835
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28cf8a9e9e10ba19e6805972947acd180f2004d3e4799a75464fc2128ddfd0e2
3
+ size 2028069268
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c695a0edc9e85d96bf55c0d24cb137b0a8845db35ba21384c872c332f920f9c6
3
+ size 5112
vocab.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52a2c8ad1723b1952d64ad5155b3cc87e13ae15e55cc9c6c2a2c078c7ce90216
3
+ size 17457360