JiyangZhang commited on
Commit
f8beda1
1 Parent(s): 314dad7

Add exLong no eTest name

Browse files
Files changed (3) hide show
  1. adapter_config.json +5 -5
  2. adapter_model.bin +1 -1
  3. logs.txt +25 -25
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "down_proj",
24
- "k_proj",
25
  "o_proj",
26
- "v_proj",
27
  "gate_proj",
28
- "q_proj",
29
- "up_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "o_proj",
24
+ "k_proj",
25
  "gate_proj",
26
+ "v_proj",
27
+ "up_proj",
28
+ "down_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b36a6bbf3e5cc8083c7f27bb292a084cf36135f6c40b268a40a0d4d5818fde3c
3
  size 319977674
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:783fc5d060218d483d88b91733f99c6da0383ee829f013d2c7e54399af2414c8
3
  size 319977674
logs.txt CHANGED
@@ -5,7 +5,7 @@ tags:
5
  - generated_from_trainer
6
  library_name: peft
7
  model-index:
8
- - name: work/10283/sarella/ls6/exlong-internal/_work/exp/conditionnestack2e-with-name-ft/lora-codellama-7b-123
9
  results: []
10
  ---
11
 
@@ -23,7 +23,7 @@ base_model_config: codellama/CodeLlama-7b-Instruct-hf
23
  bf16: true
24
  dataset_prepared_path: null
25
  datasets:
26
- - path: /work/10283/sarella/ls6/exlong-internal/_work/setup/conditionnestack2e-with-name-ft/train/train/train-conditionnestack2e-with-name-ft.jsonl
27
  type:
28
  field_input: input
29
  field_instruction: instruction
@@ -63,7 +63,7 @@ micro_batch_size: 4
63
  model_type: LlamaForCausalLM
64
  num_epochs: 3
65
  optimizer: adamw_bnb_8bit
66
- output_dir: /work/10283/sarella/ls6/exlong-internal/_work/exp/conditionnestack2e-with-name-ft/lora-codellama-7b-123
67
  pad_to_sequence_len: true
68
  resume_from_checkpoint: null
69
  sample_packing: true
@@ -92,11 +92,11 @@ xformers_attention: null
92
 
93
  </details><br>
94
 
95
- # work/10283/sarella/ls6/exlong-internal/_work/exp/conditionnestack2e-with-name-ft/lora-codellama-7b-123
96
 
97
  This model is a fine-tuned version of [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf) on the None dataset.
98
  It achieves the following results on the evaluation set:
99
- - Loss: 0.2909
100
 
101
  ## Model description
102
 
@@ -130,26 +130,26 @@ The following hyperparameters were used during training:
130
 
131
  | Training Loss | Epoch | Step | Validation Loss |
132
  |:-------------:|:-----:|:----:|:---------------:|
133
- | 0.631 | 0.01 | 1 | 0.7976 |
134
- | 0.2584 | 0.16 | 20 | 0.3000 |
135
- | 0.2045 | 0.31 | 40 | 0.2802 |
136
- | 0.1596 | 0.47 | 60 | 0.2725 |
137
- | 0.2024 | 0.62 | 80 | 0.2715 |
138
- | 0.1613 | 0.78 | 100 | 0.2638 |
139
- | 0.1341 | 0.93 | 120 | 0.2638 |
140
- | 0.1295 | 1.07 | 140 | 0.2672 |
141
- | 0.122 | 1.22 | 160 | 0.2694 |
142
- | 0.1247 | 1.38 | 180 | 0.2711 |
143
- | 0.1344 | 1.53 | 200 | 0.2733 |
144
- | 0.1329 | 1.69 | 220 | 0.2796 |
145
- | 0.1095 | 1.85 | 240 | 0.2731 |
146
- | 0.1167 | 2.0 | 260 | 0.2751 |
147
- | 0.0929 | 2.14 | 280 | 0.2849 |
148
- | 0.1112 | 2.3 | 300 | 0.2927 |
149
- | 0.1132 | 2.45 | 320 | 0.2856 |
150
- | 0.0876 | 2.61 | 340 | 0.2915 |
151
- | 0.0982 | 2.76 | 360 | 0.2894 |
152
- | 0.1042 | 2.92 | 380 | 0.2909 |
153
 
154
 
155
  ### Framework versions
 
5
  - generated_from_trainer
6
  library_name: peft
7
  model-index:
8
+ - name: work/10283/sarella/ls6/exlong-internal/_work/exp/conditionnestack2e-no-name-ft/lora-codellama-7b-123
9
  results: []
10
  ---
11
 
 
23
  bf16: true
24
  dataset_prepared_path: null
25
  datasets:
26
+ - path: /work/10283/sarella/ls6/exlong-internal/_work/setup/conditionnestack2e-no-name-ft/train/train/train-conditionnestack2e-no-name-ft.jsonl
27
  type:
28
  field_input: input
29
  field_instruction: instruction
 
63
  model_type: LlamaForCausalLM
64
  num_epochs: 3
65
  optimizer: adamw_bnb_8bit
66
+ output_dir: /work/10283/sarella/ls6/exlong-internal/_work/exp/conditionnestack2e-no-name-ft/lora-codellama-7b-123
67
  pad_to_sequence_len: true
68
  resume_from_checkpoint: null
69
  sample_packing: true
 
92
 
93
  </details><br>
94
 
95
+ # work/10283/sarella/ls6/exlong-internal/_work/exp/conditionnestack2e-no-name-ft/lora-codellama-7b-123
96
 
97
  This model is a fine-tuned version of [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf) on the None dataset.
98
  It achieves the following results on the evaluation set:
99
+ - Loss: 0.4931
100
 
101
  ## Model description
102
 
 
130
 
131
  | Training Loss | Epoch | Step | Validation Loss |
132
  |:-------------:|:-----:|:----:|:---------------:|
133
+ | 0.8379 | 0.01 | 1 | 1.0354 |
134
+ | 0.3779 | 0.16 | 20 | 0.4820 |
135
+ | 0.3361 | 0.31 | 40 | 0.4560 |
136
+ | 0.3153 | 0.47 | 60 | 0.4467 |
137
+ | 0.2735 | 0.63 | 80 | 0.4457 |
138
+ | 0.2437 | 0.78 | 100 | 0.4400 |
139
+ | 0.2941 | 0.94 | 120 | 0.4416 |
140
+ | 0.2153 | 1.08 | 140 | 0.4466 |
141
+ | 0.2583 | 1.23 | 160 | 0.4499 |
142
+ | 0.2026 | 1.39 | 180 | 0.4540 |
143
+ | 0.185 | 1.55 | 200 | 0.4541 |
144
+ | 0.2296 | 1.7 | 220 | 0.4604 |
145
+ | 0.2059 | 1.86 | 240 | 0.4591 |
146
+ | 0.1998 | 2.02 | 260 | 0.4626 |
147
+ | 0.1879 | 2.15 | 280 | 0.4828 |
148
+ | 0.1861 | 2.31 | 300 | 0.4944 |
149
+ | 0.1561 | 2.47 | 320 | 0.4947 |
150
+ | 0.1888 | 2.62 | 340 | 0.4939 |
151
+ | 0.1665 | 2.78 | 360 | 0.4945 |
152
+ | 0.1627 | 2.94 | 380 | 0.4931 |
153
 
154
 
155
  ### Framework versions