apwic commited on
Commit
191a3d2
1 Parent(s): c627528

Model save

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: indolem/indobert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - precision
8
+ - recall
9
+ - f1
10
+ - accuracy
11
+ model-index:
12
+ - name: nerugm-lora-r8a0d0.1
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # nerugm-lora-r8a0d0.1
20
+
21
+ This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.1278
24
+ - Precision: 0.7600
25
+ - Recall: 0.8815
26
+ - F1: 0.8162
27
+ - Accuracy: 0.9593
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 16
48
+ - eval_batch_size: 64
49
+ - seed: 42
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 20.0
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | 0.713 | 1.0 | 528 | 0.3558 | 0.4950 | 0.3736 | 0.4258 | 0.8990 |
59
+ | 0.2793 | 2.0 | 1056 | 0.1931 | 0.6472 | 0.8048 | 0.7174 | 0.9392 |
60
+ | 0.1876 | 3.0 | 1584 | 0.1619 | 0.6758 | 0.8466 | 0.7516 | 0.9462 |
61
+ | 0.1593 | 4.0 | 2112 | 0.1416 | 0.7366 | 0.8629 | 0.7948 | 0.9555 |
62
+ | 0.1412 | 5.0 | 2640 | 0.1350 | 0.7386 | 0.8652 | 0.7969 | 0.9559 |
63
+ | 0.1325 | 6.0 | 3168 | 0.1361 | 0.7324 | 0.8698 | 0.7952 | 0.9555 |
64
+ | 0.126 | 7.0 | 3696 | 0.1383 | 0.7310 | 0.8698 | 0.7944 | 0.9553 |
65
+ | 0.1194 | 8.0 | 4224 | 0.1349 | 0.7456 | 0.8838 | 0.8088 | 0.9583 |
66
+ | 0.1137 | 9.0 | 4752 | 0.1299 | 0.7495 | 0.8745 | 0.8072 | 0.9583 |
67
+ | 0.1112 | 10.0 | 5280 | 0.1285 | 0.7455 | 0.8698 | 0.8029 | 0.9579 |
68
+ | 0.1065 | 11.0 | 5808 | 0.1304 | 0.7525 | 0.8815 | 0.8119 | 0.9587 |
69
+ | 0.1044 | 12.0 | 6336 | 0.1329 | 0.7520 | 0.8791 | 0.8106 | 0.9577 |
70
+ | 0.1026 | 13.0 | 6864 | 0.1257 | 0.7520 | 0.8722 | 0.8076 | 0.9585 |
71
+ | 0.0989 | 14.0 | 7392 | 0.1265 | 0.7626 | 0.8791 | 0.8167 | 0.9599 |
72
+ | 0.0982 | 15.0 | 7920 | 0.1281 | 0.7631 | 0.8815 | 0.8180 | 0.9597 |
73
+ | 0.0974 | 16.0 | 8448 | 0.1264 | 0.7515 | 0.8768 | 0.8093 | 0.9597 |
74
+ | 0.0966 | 17.0 | 8976 | 0.1282 | 0.7545 | 0.8838 | 0.8140 | 0.9589 |
75
+ | 0.095 | 18.0 | 9504 | 0.1292 | 0.7570 | 0.8815 | 0.8145 | 0.9589 |
76
+ | 0.0941 | 19.0 | 10032 | 0.1268 | 0.7585 | 0.8815 | 0.8154 | 0.9595 |
77
+ | 0.0948 | 20.0 | 10560 | 0.1278 | 0.7600 | 0.8815 | 0.8162 | 0.9593 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.39.3
83
+ - Pytorch 2.3.0+cu121
84
+ - Datasets 2.19.1
85
+ - Tokenizers 0.15.2
nerugm-lora/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 0,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.1,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 8,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "config_id": "9a6df5ed9051a32d",
20
+ "hidden_size": 768,
21
+ "model_class": "BertForTokenClassification",
22
+ "model_name": "indolem/indobert-base-uncased",
23
+ "model_type": "bert",
24
+ "name": "nerugm-lora",
25
+ "version": "0.2.0"
26
+ }
nerugm-lora/head_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": null,
3
+ "hidden_size": 768,
4
+ "label2id": {
5
+ "B-LOCATION": 0,
6
+ "B-ORGANIZATION": 1,
7
+ "B-PERSON": 2,
8
+ "B-QUANTITY": 3,
9
+ "B-TIME": 4,
10
+ "I-LOCATION": 5,
11
+ "I-ORGANIZATION": 6,
12
+ "I-PERSON": 7,
13
+ "I-QUANTITY": 8,
14
+ "I-TIME": 9,
15
+ "O": 10
16
+ },
17
+ "model_class": "BertForTokenClassification",
18
+ "model_name": "indolem/indobert-base-uncased",
19
+ "model_type": "bert",
20
+ "name": null,
21
+ "num_labels": 11,
22
+ "version": "0.2.0"
23
+ }
nerugm-lora/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dba60fec3a5d31ce89fd87b7f10a115a06ca293643355d7e1cc522c48a170ea2
3
+ size 1197350
nerugm-lora/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33641fb2d1671be575b6dc68510afd228e40323ef502257c211354de611b4876
3
+ size 35354
runs/May25_01-26-31_indolem-petl-vm/events.out.tfevents.1716600398.indolem-petl-vm.1837709.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dac28cb931c0230a9b47ae936e42075c3bbea1d88aff8a6bdc702a9829d2688b
3
- size 18199
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d202c4cd0cd03868a7ea506c445e0ee678676734dd50de6dda3b330310c85f31
3
+ size 19236