gyr66 commited on
Commit
be1c1b2
1 Parent(s): d550182

End of training

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: gyr66/RoBERTa-ext-large-chinese-finetuned-ner
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - precision
7
+ - recall
8
+ - f1
9
+ - accuracy
10
+ model-index:
11
+ - name: RoBERTa-ext-large-lora-updated-chinese-finetuned-ner
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # RoBERTa-ext-large-lora-updated-chinese-finetuned-ner
19
+
20
+ This model is a fine-tuned version of [gyr66/RoBERTa-ext-large-chinese-finetuned-ner](https://huggingface.co/gyr66/RoBERTa-ext-large-chinese-finetuned-ner) on an unknown dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.9586
23
+ - Precision: 0.7016
24
+ - Recall: 0.7518
25
+ - F1: 0.7258
26
+ - Accuracy: 0.9154
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 0.001
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: linear
51
+ - num_epochs: 10
52
+
53
+ ### Training results
54
+
55
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
56
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
57
+ | 0.0034 | 1.0 | 252 | 1.0787 | 0.6753 | 0.7523 | 0.7117 | 0.9121 |
58
+ | 0.0032 | 2.0 | 504 | 1.0376 | 0.6830 | 0.7490 | 0.7145 | 0.9141 |
59
+ | 0.0018 | 3.0 | 756 | 1.0547 | 0.6731 | 0.7573 | 0.7127 | 0.9126 |
60
+ | 0.0032 | 4.0 | 1008 | 1.0262 | 0.6829 | 0.7384 | 0.7096 | 0.9126 |
61
+ | 0.0027 | 5.0 | 1260 | 0.9613 | 0.6898 | 0.7445 | 0.7161 | 0.9118 |
62
+ | 0.0027 | 6.0 | 1512 | 0.9481 | 0.6780 | 0.7550 | 0.7145 | 0.9120 |
63
+ | 0.0019 | 7.0 | 1764 | 0.9328 | 0.6917 | 0.7513 | 0.7203 | 0.9150 |
64
+ | 0.0008 | 8.0 | 2016 | 0.9570 | 0.6976 | 0.7520 | 0.7238 | 0.9143 |
65
+ | 0.0005 | 9.0 | 2268 | 0.9586 | 0.7016 | 0.7518 | 0.7258 | 0.9154 |
66
+ | 0.0003 | 10.0 | 2520 | 0.9565 | 0.6945 | 0.7520 | 0.7221 | 0.9151 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.35.2
72
+ - Pytorch 2.1.0+cu121
73
+ - Datasets 2.16.1
74
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "gyr66/RoBERTa-ext-large-chinese-finetuned-ner",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 4,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "value",
23
+ "query"
24
+ ],
25
+ "task_type": "TOKEN_CLS"
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:022c9f6e20322b250de8f15e86b738f10a90e728abbfb51af2012572e851544a
3
+ size 1705460
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "ignore_mismatched_sizes": true,
48
+ "mask_token": "[MASK]",
49
+ "max_length": 512,
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "stride": 0,
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "truncation_side": "right",
58
+ "truncation_strategy": "longest_first",
59
+ "unk_token": "[UNK]"
60
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44f0c6517e2cae15e57364895f855385a5dbaa8224d3ffb57e1fa098272dd6ee
3
+ size 4600
vocab.txt ADDED
The diff for this file is too large to render. See raw diff