apwic commited on
Commit
25ed3c8
1 Parent(s): 553c34f

Model save

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: indolem/indobert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - precision
8
+ - recall
9
+ - f1
10
+ - accuracy
11
+ model-index:
12
+ - name: nerugm-lora-r2a2d0.1
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # nerugm-lora-r2a2d0.1
20
+
21
+ This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.1332
24
+ - Precision: 0.7287
25
+ - Recall: 0.8536
26
+ - F1: 0.7862
27
+ - Accuracy: 0.9555
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 16
48
+ - eval_batch_size: 64
49
+ - seed: 42
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 20.0
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | 0.7886 | 1.0 | 528 | 0.4607 | 0.3243 | 0.0837 | 0.1330 | 0.8597 |
59
+ | 0.3911 | 2.0 | 1056 | 0.2542 | 0.6081 | 0.6915 | 0.6471 | 0.9293 |
60
+ | 0.2384 | 3.0 | 1584 | 0.1934 | 0.6527 | 0.7937 | 0.7163 | 0.9376 |
61
+ | 0.1934 | 4.0 | 2112 | 0.1678 | 0.6880 | 0.8187 | 0.7477 | 0.9446 |
62
+ | 0.172 | 5.0 | 2640 | 0.1589 | 0.6901 | 0.8373 | 0.7566 | 0.9468 |
63
+ | 0.1602 | 6.0 | 3168 | 0.1533 | 0.6931 | 0.8489 | 0.7631 | 0.9488 |
64
+ | 0.1532 | 7.0 | 3696 | 0.1505 | 0.6935 | 0.8559 | 0.7662 | 0.9498 |
65
+ | 0.1457 | 8.0 | 4224 | 0.1456 | 0.7103 | 0.8536 | 0.7754 | 0.9522 |
66
+ | 0.1401 | 9.0 | 4752 | 0.1418 | 0.7301 | 0.8536 | 0.7870 | 0.9543 |
67
+ | 0.1375 | 10.0 | 5280 | 0.1388 | 0.7308 | 0.8582 | 0.7894 | 0.9551 |
68
+ | 0.1331 | 11.0 | 5808 | 0.1360 | 0.7308 | 0.8582 | 0.7894 | 0.9555 |
69
+ | 0.1304 | 12.0 | 6336 | 0.1365 | 0.7258 | 0.8536 | 0.7845 | 0.9549 |
70
+ | 0.1285 | 13.0 | 6864 | 0.1343 | 0.7380 | 0.8512 | 0.7906 | 0.9559 |
71
+ | 0.1255 | 14.0 | 7392 | 0.1345 | 0.7401 | 0.8605 | 0.7958 | 0.9559 |
72
+ | 0.1249 | 15.0 | 7920 | 0.1346 | 0.7332 | 0.8605 | 0.7918 | 0.9549 |
73
+ | 0.1238 | 16.0 | 8448 | 0.1342 | 0.7307 | 0.8559 | 0.7883 | 0.9551 |
74
+ | 0.1232 | 17.0 | 8976 | 0.1342 | 0.7326 | 0.8582 | 0.7905 | 0.9557 |
75
+ | 0.1215 | 18.0 | 9504 | 0.1351 | 0.7317 | 0.8605 | 0.7909 | 0.9549 |
76
+ | 0.1209 | 19.0 | 10032 | 0.1337 | 0.7278 | 0.8559 | 0.7866 | 0.9547 |
77
+ | 0.1207 | 20.0 | 10560 | 0.1332 | 0.7287 | 0.8536 | 0.7862 | 0.9555 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.39.3
83
+ - Pytorch 2.3.0+cu121
84
+ - Datasets 2.19.1
85
+ - Tokenizers 0.15.2
nerugm-lora/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 2,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.1,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 2,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "config_id": "d5b5be297ec0a7b2",
20
+ "hidden_size": 768,
21
+ "model_class": "BertForTokenClassification",
22
+ "model_name": "indolem/indobert-base-uncased",
23
+ "model_type": "bert",
24
+ "name": "nerugm-lora",
25
+ "version": "0.2.0"
26
+ }
nerugm-lora/head_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": null,
3
+ "hidden_size": 768,
4
+ "label2id": {
5
+ "B-LOCATION": 0,
6
+ "B-ORGANIZATION": 1,
7
+ "B-PERSON": 2,
8
+ "B-QUANTITY": 3,
9
+ "B-TIME": 4,
10
+ "I-LOCATION": 5,
11
+ "I-ORGANIZATION": 6,
12
+ "I-PERSON": 7,
13
+ "I-QUANTITY": 8,
14
+ "I-TIME": 9,
15
+ "O": 10
16
+ },
17
+ "model_class": "BertForTokenClassification",
18
+ "model_name": "indolem/indobert-base-uncased",
19
+ "model_type": "bert",
20
+ "name": null,
21
+ "num_labels": 11,
22
+ "version": "0.2.0"
23
+ }
nerugm-lora/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c300a8a7a02e728b4ef0043f7305436e4f704fb2a751c1afbde12fd4e2f76c13
3
+ size 312614
nerugm-lora/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e211ec28f101918ce92456079143ad1a4cb40b8e14448865512b55dbc3f6a954
3
+ size 35354
runs/May24_21-10-53_indolem-petl-vm/events.out.tfevents.1716585060.indolem-petl-vm.1721760.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2427cd49c8f0c174cba48f586f17eab54928c36e9c03582f6600551218442fd
3
- size 18199
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd67d7e7940152899debdea0cce549bc740b2abd6aaa867719d285d907afb023
3
+ size 19236