nhankins commited on
Commit
1fa5459
1 Parent(s): 16f8b52

nhankins/zh_distilbert_lora_adapter_3.0

Browse files
README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: distilbert/distilbert-base-multilingual-cased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - precision
8
+ - recall
9
+ model-index:
10
+ - name: distilbert-base-multilingual-cased-lora-text-classification
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # distilbert-base-multilingual-cased-lora-text-classification
18
+
19
+ This model is a fine-tuned version of [distilbert/distilbert-base-multilingual-cased](https://huggingface.co/distilbert/distilbert-base-multilingual-cased) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.4881
22
+ - Precision: 0.7966
23
+ - Recall: 0.9216
24
+ - F1 and accuracy: {'accuracy': 0.7605985037406484, 'f1': 0.8545454545454545}
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 1e-05
44
+ - train_batch_size: 4
45
+ - eval_batch_size: 4
46
+ - seed: 42
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 10
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 and accuracy |
54
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:----------------------------------------------------------:|
55
+ | No log | 1.0 | 401 | 0.5429 | 0.7631 | 1.0 | {'accuracy': 0.7630922693266833, 'f1': 0.8656294200848657} |
56
+ | 0.5808 | 2.0 | 802 | 0.5361 | 0.7631 | 1.0 | {'accuracy': 0.7630922693266833, 'f1': 0.8656294200848657} |
57
+ | 0.5805 | 3.0 | 1203 | 0.5235 | 0.7631 | 1.0 | {'accuracy': 0.7630922693266833, 'f1': 0.8656294200848657} |
58
+ | 0.5554 | 4.0 | 1604 | 0.5096 | 0.7669 | 1.0 | {'accuracy': 0.7680798004987531, 'f1': 0.8680851063829788} |
59
+ | 0.5214 | 5.0 | 2005 | 0.5046 | 0.7734 | 0.9706 | {'accuracy': 0.7605985037406484, 'f1': 0.8608695652173913} |
60
+ | 0.5214 | 6.0 | 2406 | 0.4971 | 0.7950 | 0.9379 | {'accuracy': 0.7680798004987531, 'f1': 0.8605697151424289} |
61
+ | 0.5152 | 7.0 | 2807 | 0.4919 | 0.7983 | 0.9183 | {'accuracy': 0.7605985037406484, 'f1': 0.8541033434650457} |
62
+ | 0.4956 | 8.0 | 3208 | 0.4881 | 0.8017 | 0.9118 | {'accuracy': 0.7605985037406484, 'f1': 0.8532110091743118} |
63
+ | 0.4891 | 9.0 | 3609 | 0.4881 | 0.7972 | 0.9248 | {'accuracy': 0.7630922693266833, 'f1': 0.8562783661119516} |
64
+ | 0.5038 | 10.0 | 4010 | 0.4881 | 0.7966 | 0.9216 | {'accuracy': 0.7605985037406484, 'f1': 0.8545454545454545} |
65
+
66
+
67
+ ### Framework versions
68
+
69
+ - Transformers 4.35.2
70
+ - Pytorch 2.1.0+cu121
71
+ - Datasets 2.17.0
72
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "distilbert/distilbert-base-multilingual-cased",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.01,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 4,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "q_lin",
23
+ "out_lin",
24
+ "lin1",
25
+ "k_lin",
26
+ "v_lin",
27
+ "lin2"
28
+ ],
29
+ "task_type": "SEQ_CLS",
30
+ "use_rslora": false
31
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:972c95eba13b916d51795df674130be111fbfb8bfd4f614cb5664b02010140fd
3
+ size 3706232
runs/Feb15_20-29-50_dd85c6ed0450/events.out.tfevents.1708028994.dd85c6ed0450.636.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df3ce693d2cb4d4d7e9fd169bb60253eb1f6be2daff3e9fc4afe2adab7c33f69
3
+ size 9791
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "[PAD]",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "100": {
13
+ "content": "[UNK]",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "101": {
21
+ "content": "[CLS]",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "102": {
29
+ "content": "[SEP]",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "103": {
37
+ "content": "[MASK]",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": false,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 1000000000000000019884624838656,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e31c375ec35784bcc855eb19d00e3b1ef0e615e6a52fd03a341ed5543b029dc0
3
+ size 4664
vocab.txt ADDED
The diff for this file is too large to render. See raw diff