apwic commited on
Commit
d820851
1 Parent(s): 8174260

Model save

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: indolem/indobert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - precision
8
+ - recall
9
+ - f1
10
+ - accuracy
11
+ model-index:
12
+ - name: nerugm-lora-r8a0d0.15
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # nerugm-lora-r8a0d0.15
20
+
21
+ This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.1281
24
+ - Precision: 0.7470
25
+ - Recall: 0.8629
26
+ - F1: 0.8008
27
+ - Accuracy: 0.9579
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 16
48
+ - eval_batch_size: 64
49
+ - seed: 42
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 20.0
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | 0.7018 | 1.0 | 528 | 0.3353 | 0.5529 | 0.4800 | 0.5138 | 0.9115 |
59
+ | 0.2639 | 2.0 | 1056 | 0.1912 | 0.6494 | 0.8210 | 0.7252 | 0.9412 |
60
+ | 0.1862 | 3.0 | 1584 | 0.1672 | 0.6739 | 0.8536 | 0.7531 | 0.9466 |
61
+ | 0.1612 | 4.0 | 2112 | 0.1446 | 0.7238 | 0.8512 | 0.7824 | 0.9539 |
62
+ | 0.1439 | 5.0 | 2640 | 0.1390 | 0.7254 | 0.8582 | 0.7863 | 0.9545 |
63
+ | 0.1358 | 6.0 | 3168 | 0.1392 | 0.7256 | 0.8652 | 0.7893 | 0.9551 |
64
+ | 0.129 | 7.0 | 3696 | 0.1384 | 0.7267 | 0.8698 | 0.7919 | 0.9561 |
65
+ | 0.1228 | 8.0 | 4224 | 0.1339 | 0.7353 | 0.8698 | 0.7969 | 0.9575 |
66
+ | 0.1168 | 9.0 | 4752 | 0.1321 | 0.7439 | 0.8559 | 0.7960 | 0.9577 |
67
+ | 0.1146 | 10.0 | 5280 | 0.1300 | 0.7445 | 0.8582 | 0.7973 | 0.9581 |
68
+ | 0.1105 | 11.0 | 5808 | 0.1327 | 0.7333 | 0.8675 | 0.7948 | 0.9571 |
69
+ | 0.1083 | 12.0 | 6336 | 0.1333 | 0.7342 | 0.8652 | 0.7943 | 0.9569 |
70
+ | 0.106 | 13.0 | 6864 | 0.1265 | 0.7490 | 0.8582 | 0.7999 | 0.9591 |
71
+ | 0.1032 | 14.0 | 7392 | 0.1269 | 0.7445 | 0.8582 | 0.7973 | 0.9589 |
72
+ | 0.1023 | 15.0 | 7920 | 0.1291 | 0.7455 | 0.8629 | 0.7999 | 0.9585 |
73
+ | 0.1014 | 16.0 | 8448 | 0.1271 | 0.7400 | 0.8582 | 0.7947 | 0.9575 |
74
+ | 0.1002 | 17.0 | 8976 | 0.1281 | 0.7460 | 0.8722 | 0.8042 | 0.9589 |
75
+ | 0.0986 | 18.0 | 9504 | 0.1304 | 0.7416 | 0.8722 | 0.8016 | 0.9573 |
76
+ | 0.0978 | 19.0 | 10032 | 0.1271 | 0.7520 | 0.8652 | 0.8046 | 0.9589 |
77
+ | 0.0984 | 20.0 | 10560 | 0.1281 | 0.7470 | 0.8629 | 0.8008 | 0.9579 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.39.3
83
+ - Pytorch 2.3.0+cu121
84
+ - Datasets 2.19.1
85
+ - Tokenizers 0.15.2
nerugm-lora/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 0,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.15,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 8,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "config_id": "c75f72d9b053e5c8",
20
+ "hidden_size": 768,
21
+ "model_class": "BertForTokenClassification",
22
+ "model_name": "indolem/indobert-base-uncased",
23
+ "model_type": "bert",
24
+ "name": "nerugm-lora",
25
+ "version": "0.2.0"
26
+ }
nerugm-lora/head_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": null,
3
+ "hidden_size": 768,
4
+ "label2id": {
5
+ "B-LOCATION": 0,
6
+ "B-ORGANIZATION": 1,
7
+ "B-PERSON": 2,
8
+ "B-QUANTITY": 3,
9
+ "B-TIME": 4,
10
+ "I-LOCATION": 5,
11
+ "I-ORGANIZATION": 6,
12
+ "I-PERSON": 7,
13
+ "I-QUANTITY": 8,
14
+ "I-TIME": 9,
15
+ "O": 10
16
+ },
17
+ "model_class": "BertForTokenClassification",
18
+ "model_name": "indolem/indobert-base-uncased",
19
+ "model_type": "bert",
20
+ "name": null,
21
+ "num_labels": 11,
22
+ "version": "0.2.0"
23
+ }
nerugm-lora/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c739fa13c835de50731fc6ed2cfbc45d06f46b5236f24f3c27b089556e87440a
3
+ size 1197350
nerugm-lora/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd0f37b0b97491ca95558f8804978bfc1b3b10a53645c5f9fbbab42113cca4a7
3
+ size 35354
runs/May25_01-47-45_indolem-petl-vm/events.out.tfevents.1716601672.indolem-petl-vm.1850316.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3651eeaad2fe9840e200f50edd7a74bf8c41428d73f9ddc98373c1b006d8f4f3
3
- size 18202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f0dfcb8cf960c6989138e8ddb5d012f6555c947db970b1ebbdbefdbe79ba5e
3
+ size 19239