Siqi-Hu commited on
Commit
e81ed71
·
verified ·
1 Parent(s): 622713e

Upload 7 files

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: llama2
4
+ base_model: meta-llama/Llama-2-7b-hf
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: Llama2-7B-lora-r-32-generic-step-1200-lr-1e-5-labels_40.0-3
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # Llama2-7B-lora-r-32-generic-step-1200-lr-1e-5-labels_40.0-3
16
+
17
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 2.8089
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 1e-05
39
+ - train_batch_size: 16
40
+ - eval_batch_size: 16
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 4
43
+ - total_train_batch_size: 64
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_steps: 50
47
+ - training_steps: 1200
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss |
53
+ |:-------------:|:-------:|:----:|:---------------:|
54
+ | 5.4776 | 0.9132 | 50 | 4.8849 |
55
+ | 4.2584 | 1.8265 | 100 | 3.8766 |
56
+ | 3.674 | 2.7397 | 150 | 3.5260 |
57
+ | 3.3895 | 3.6530 | 200 | 3.3094 |
58
+ | 3.2045 | 4.5662 | 250 | 3.1682 |
59
+ | 3.0743 | 5.4795 | 300 | 3.0658 |
60
+ | 2.944 | 6.3927 | 350 | 2.9913 |
61
+ | 2.8487 | 7.3059 | 400 | 2.9373 |
62
+ | 2.7773 | 8.2192 | 450 | 2.8989 |
63
+ | 2.7123 | 9.1324 | 500 | 2.8699 |
64
+ | 2.6538 | 10.0457 | 550 | 2.8497 |
65
+ | 2.6145 | 10.9589 | 600 | 2.8358 |
66
+ | 2.5768 | 11.8721 | 650 | 2.8264 |
67
+ | 2.5979 | 12.7854 | 700 | 2.8210 |
68
+ | 2.5595 | 13.6986 | 750 | 2.8141 |
69
+ | 2.5285 | 14.6119 | 800 | 2.8104 |
70
+ | 2.5112 | 15.5251 | 850 | 2.8109 |
71
+ | 2.4877 | 16.4384 | 900 | 2.8095 |
72
+ | 2.4536 | 17.3516 | 950 | 2.8070 |
73
+ | 2.5073 | 18.2648 | 1000 | 2.8081 |
74
+ | 2.4321 | 19.1781 | 1050 | 2.8069 |
75
+ | 2.4284 | 20.0913 | 1100 | 2.8085 |
76
+ | 2.4627 | 21.0046 | 1150 | 2.8088 |
77
+ | 2.4636 | 21.9178 | 1200 | 2.8089 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - PEFT 0.15.2
83
+ - Transformers 4.45.2
84
+ - Pytorch 2.5.0+cu121
85
+ - Datasets 3.2.0
86
+ - Tokenizers 0.20.3
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "max_length": 512,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "truncation": true,
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d76dda35e60cb1c28c320e4f6a844daa8a929a31ecf5ac9fde8508cb6d8eb69
3
+ size 5560
training_metrics.json ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train_loss": [
3
+ 5.4776,
4
+ 4.2584,
5
+ 3.674,
6
+ 3.3895,
7
+ 3.2045,
8
+ 3.0743,
9
+ 2.944,
10
+ 2.8487,
11
+ 2.7773,
12
+ 2.7123,
13
+ 2.6538,
14
+ 2.6145,
15
+ 2.5768,
16
+ 2.5979,
17
+ 2.5595,
18
+ 2.5285,
19
+ 2.5112,
20
+ 2.4877,
21
+ 2.4536,
22
+ 2.5073,
23
+ 2.4321,
24
+ 2.4284,
25
+ 2.4627,
26
+ 2.4636
27
+ ],
28
+ "train_steps": [
29
+ 50,
30
+ 100,
31
+ 150,
32
+ 200,
33
+ 250,
34
+ 300,
35
+ 350,
36
+ 400,
37
+ 450,
38
+ 500,
39
+ 550,
40
+ 600,
41
+ 650,
42
+ 700,
43
+ 750,
44
+ 800,
45
+ 850,
46
+ 900,
47
+ 950,
48
+ 1000,
49
+ 1050,
50
+ 1100,
51
+ 1150,
52
+ 1200
53
+ ],
54
+ "train_epochs": [
55
+ 0.91324200913242,
56
+ 1.82648401826484,
57
+ 2.73972602739726,
58
+ 3.65296803652968,
59
+ 4.566210045662101,
60
+ 5.47945205479452,
61
+ 6.392694063926941,
62
+ 7.30593607305936,
63
+ 8.219178082191782,
64
+ 9.132420091324201,
65
+ 10.045662100456621,
66
+ 10.95890410958904,
67
+ 11.872146118721462,
68
+ 12.785388127853881,
69
+ 13.698630136986301,
70
+ 14.61187214611872,
71
+ 15.525114155251142,
72
+ 16.438356164383563,
73
+ 17.35159817351598,
74
+ 18.264840182648403,
75
+ 19.17808219178082,
76
+ 20.091324200913242,
77
+ 21.004566210045663,
78
+ 21.91780821917808
79
+ ],
80
+ "eval_loss": [
81
+ 4.884899139404297,
82
+ 3.8766419887542725,
83
+ 3.5259857177734375,
84
+ 3.3093771934509277,
85
+ 3.168154239654541,
86
+ 3.06583571434021,
87
+ 2.991332769393921,
88
+ 2.9372518062591553,
89
+ 2.8988661766052246,
90
+ 2.8699264526367188,
91
+ 2.8496575355529785,
92
+ 2.835752010345459,
93
+ 2.826432466506958,
94
+ 2.821038007736206,
95
+ 2.814145565032959,
96
+ 2.8103811740875244,
97
+ 2.8109121322631836,
98
+ 2.8094851970672607,
99
+ 2.8070461750030518,
100
+ 2.808090925216675,
101
+ 2.806938886642456,
102
+ 2.808488607406616,
103
+ 2.808797597885132,
104
+ 2.808856248855591
105
+ ],
106
+ "eval_steps": [
107
+ 50,
108
+ 100,
109
+ 150,
110
+ 200,
111
+ 250,
112
+ 300,
113
+ 350,
114
+ 400,
115
+ 450,
116
+ 500,
117
+ 550,
118
+ 600,
119
+ 650,
120
+ 700,
121
+ 750,
122
+ 800,
123
+ 850,
124
+ 900,
125
+ 950,
126
+ 1000,
127
+ 1050,
128
+ 1100,
129
+ 1150,
130
+ 1200
131
+ ],
132
+ "eval_epochs": [
133
+ 0.91324200913242,
134
+ 1.82648401826484,
135
+ 2.73972602739726,
136
+ 3.65296803652968,
137
+ 4.566210045662101,
138
+ 5.47945205479452,
139
+ 6.392694063926941,
140
+ 7.30593607305936,
141
+ 8.219178082191782,
142
+ 9.132420091324201,
143
+ 10.045662100456621,
144
+ 10.95890410958904,
145
+ 11.872146118721462,
146
+ 12.785388127853881,
147
+ 13.698630136986301,
148
+ 14.61187214611872,
149
+ 15.525114155251142,
150
+ 16.438356164383563,
151
+ 17.35159817351598,
152
+ 18.264840182648403,
153
+ 19.17808219178082,
154
+ 20.091324200913242,
155
+ 21.004566210045663,
156
+ 21.91780821917808
157
+ ]
158
+ }