apwic commited on
Commit
db03c13
1 Parent(s): 6c92e6c

Model save

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: indolem/indobert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - precision
8
+ - recall
9
+ - f1
10
+ - accuracy
11
+ model-index:
12
+ - name: nerugm-lora-r8a1d0.05
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # nerugm-lora-r8a1d0.05
20
+
21
+ This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.1266
24
+ - Precision: 0.7622
25
+ - Recall: 0.8698
26
+ - F1: 0.8125
27
+ - Accuracy: 0.9591
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 16
48
+ - eval_batch_size: 64
49
+ - seed: 42
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 20.0
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | 0.7039 | 1.0 | 528 | 0.3293 | 0.5553 | 0.4962 | 0.5241 | 0.9123 |
59
+ | 0.2536 | 2.0 | 1056 | 0.1835 | 0.6530 | 0.8210 | 0.7274 | 0.9424 |
60
+ | 0.1831 | 3.0 | 1584 | 0.1832 | 0.6678 | 0.8210 | 0.7365 | 0.9440 |
61
+ | 0.1623 | 4.0 | 2112 | 0.1463 | 0.7213 | 0.8466 | 0.7789 | 0.9535 |
62
+ | 0.1439 | 5.0 | 2640 | 0.1387 | 0.7173 | 0.8420 | 0.7747 | 0.9541 |
63
+ | 0.1348 | 6.0 | 3168 | 0.1383 | 0.7256 | 0.8652 | 0.7893 | 0.9553 |
64
+ | 0.1293 | 7.0 | 3696 | 0.1394 | 0.7242 | 0.8652 | 0.7885 | 0.9545 |
65
+ | 0.124 | 8.0 | 4224 | 0.1351 | 0.7353 | 0.8698 | 0.7969 | 0.9569 |
66
+ | 0.1176 | 9.0 | 4752 | 0.1304 | 0.7404 | 0.8536 | 0.7930 | 0.9561 |
67
+ | 0.1153 | 10.0 | 5280 | 0.1278 | 0.7582 | 0.8582 | 0.8051 | 0.9585 |
68
+ | 0.111 | 11.0 | 5808 | 0.1304 | 0.7386 | 0.8652 | 0.7969 | 0.9579 |
69
+ | 0.109 | 12.0 | 6336 | 0.1323 | 0.7415 | 0.8652 | 0.7986 | 0.9565 |
70
+ | 0.1077 | 13.0 | 6864 | 0.1253 | 0.7649 | 0.8675 | 0.8130 | 0.9597 |
71
+ | 0.1032 | 14.0 | 7392 | 0.1243 | 0.7639 | 0.8629 | 0.8104 | 0.9593 |
72
+ | 0.1035 | 15.0 | 7920 | 0.1261 | 0.7664 | 0.8675 | 0.8138 | 0.9597 |
73
+ | 0.1017 | 16.0 | 8448 | 0.1258 | 0.7470 | 0.8559 | 0.7977 | 0.9577 |
74
+ | 0.1004 | 17.0 | 8976 | 0.1278 | 0.7576 | 0.8698 | 0.8098 | 0.9589 |
75
+ | 0.099 | 18.0 | 9504 | 0.1284 | 0.7510 | 0.8675 | 0.8051 | 0.9585 |
76
+ | 0.0991 | 19.0 | 10032 | 0.1256 | 0.7572 | 0.8605 | 0.8055 | 0.9581 |
77
+ | 0.0984 | 20.0 | 10560 | 0.1266 | 0.7622 | 0.8698 | 0.8125 | 0.9591 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.39.3
83
+ - Pytorch 2.3.0+cu121
84
+ - Datasets 2.19.1
85
+ - Tokenizers 0.15.2
nerugm-lora/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 1,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.05,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 8,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "config_id": "bb949181a19f3fc7",
20
+ "hidden_size": 768,
21
+ "model_class": "BertForTokenClassification",
22
+ "model_name": "indolem/indobert-base-uncased",
23
+ "model_type": "bert",
24
+ "name": "nerugm-lora",
25
+ "version": "0.2.0"
26
+ }
nerugm-lora/head_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": null,
3
+ "hidden_size": 768,
4
+ "label2id": {
5
+ "B-LOCATION": 0,
6
+ "B-ORGANIZATION": 1,
7
+ "B-PERSON": 2,
8
+ "B-QUANTITY": 3,
9
+ "B-TIME": 4,
10
+ "I-LOCATION": 5,
11
+ "I-ORGANIZATION": 6,
12
+ "I-PERSON": 7,
13
+ "I-QUANTITY": 8,
14
+ "I-TIME": 9,
15
+ "O": 10
16
+ },
17
+ "model_class": "BertForTokenClassification",
18
+ "model_name": "indolem/indobert-base-uncased",
19
+ "model_type": "bert",
20
+ "name": null,
21
+ "num_labels": 11,
22
+ "version": "0.2.0"
23
+ }
nerugm-lora/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d54fce02ea2f0f392da86beaae26111420451b34a66ed96db6f9e22e1d4af18
3
+ size 1197350
nerugm-lora/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07fb0806e66bc1c8f18190a7c56465f23a8a32c4abf3cc812899d62aadac9001
3
+ size 35354
runs/May25_02-09-00_indolem-petl-vm/events.out.tfevents.1716602948.indolem-petl-vm.1863087.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a023d1c5b01415302d4bc58a296ed11a6b329a037c43b4ee9b8a168db52559e
3
- size 18202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e937bb0ee713148c8dacaab3d72c19c6106fb83863c325786d93f021eca203b
3
+ size 19239