vpkrishna commited on
Commit
f740f46
1 Parent(s): c06915a

llm/llama38binstruct-summary-100s

Browse files
README.md CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 2.3170
24
 
25
  ## Model description
26
 
@@ -39,23 +39,33 @@ More information needed
39
  ### Training hyperparameters
40
 
41
  The following hyperparameters were used during training:
42
- - learning_rate: 0.0002
43
- - train_batch_size: 1
44
  - eval_batch_size: 8
45
  - seed: 42
 
 
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: constant
48
- - lr_scheduler_warmup_steps: 0.03
49
- - training_steps: 100
50
 
51
  ### Training results
52
 
53
- | Training Loss | Epoch | Step | Validation Loss |
54
- |:-------------:|:------:|:----:|:---------------:|
55
- | 1.4106 | 1.4706 | 25 | 1.8186 |
56
- | 0.6403 | 2.9412 | 50 | 1.8366 |
57
- | 0.199 | 4.4118 | 75 | 2.3530 |
58
- | 0.0682 | 5.8824 | 100 | 2.3170 |
 
 
 
 
 
 
 
 
59
 
60
 
61
  ### Framework versions
 
20
 
21
  This model is a fine-tuned version of [NousResearch/Meta-Llama-3-8B-Instruct](https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 3.2478
24
 
25
  ## Model description
26
 
 
39
  ### Training hyperparameters
40
 
41
  The following hyperparameters were used during training:
42
+ - learning_rate: 0.0001
43
+ - train_batch_size: 2
44
  - eval_batch_size: 8
45
  - seed: 42
46
+ - gradient_accumulation_steps: 4
47
+ - total_train_batch_size: 8
48
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
  - lr_scheduler_type: constant
50
+ - lr_scheduler_warmup_steps: 10
51
+ - training_steps: 300
52
 
53
  ### Training results
54
 
55
+ | Training Loss | Epoch | Step | Validation Loss |
56
+ |:-------------:|:-----:|:----:|:---------------:|
57
+ | 0.0063 | 10.0 | 25 | 2.9544 |
58
+ | 0.0033 | 20.0 | 50 | 3.1133 |
59
+ | 0.0057 | 30.0 | 75 | 2.5821 |
60
+ | 0.0032 | 40.0 | 100 | 2.9857 |
61
+ | 0.0021 | 50.0 | 125 | 3.1502 |
62
+ | 0.0019 | 60.0 | 150 | 3.0546 |
63
+ | 0.0026 | 70.0 | 175 | 2.7894 |
64
+ | 0.0045 | 80.0 | 200 | 2.6616 |
65
+ | 0.0014 | 90.0 | 225 | 3.1916 |
66
+ | 0.0009 | 100.0 | 250 | 3.2146 |
67
+ | 0.0007 | 110.0 | 275 | 3.2346 |
68
+ | 0.0006 | 120.0 | 300 | 3.2478 |
69
 
70
 
71
  ### Framework versions
adapter_config.json CHANGED
@@ -11,7 +11,7 @@
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
14
- "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "down_proj",
24
- "k_proj",
25
- "o_proj",
26
- "up_proj",
27
  "gate_proj",
 
28
  "q_proj",
29
- "v_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
  "lora_alpha": 32,
14
+ "lora_dropout": 0.15,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
 
 
 
24
  "gate_proj",
25
+ "up_proj",
26
  "q_proj",
27
+ "down_proj",
28
+ "o_proj",
29
+ "k_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e094891c1aee48fb0463f399bd76f5e390efb1534f3111cff7517fbe899a422
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef728538a619ab3e4f2aa024958451bbd939b654cde566a8a811535264e05487
3
  size 167832240
runs/Jun19_02-06-52_0113f146e29c/events.out.tfevents.1718762822.0113f146e29c.1122.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:094df86ed11868930002e1c74849f41f0d70840b82c458b1532a133e949ac51c
3
+ size 17092
runs/Jun19_02-15-31_0113f146e29c/events.out.tfevents.1718763345.0113f146e29c.1122.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b53dd431bcb936ce2afafe7e31be680510d4fcc461ee0a43e5dda7179f86f6b
3
+ size 6851
runs/Jun19_02-30-55_0113f146e29c/events.out.tfevents.1718764264.0113f146e29c.1122.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4f7b77901cc418cfa4a0cda79380d198fe7eaeeb52cb3bc204e0cb06aee6bda
3
+ size 5758
runs/Jun19_02-31-44_0113f146e29c/events.out.tfevents.1718764313.0113f146e29c.1122.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da19f81b750f56cfd1c1d4ca7f110582d7bbf59dcc4c328692e1838399cc3e4
3
+ size 5756
runs/Jun19_02-32-38_0113f146e29c/events.out.tfevents.1718764370.0113f146e29c.1122.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3e209dd08b935cacb92d4c27125ab2abbfe957487923be4af4e34122d356603
3
+ size 6171
runs/Jun19_02-40-57_0113f146e29c/events.out.tfevents.1718764865.0113f146e29c.1122.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eda9984f2289eeb32a8de11a244dd3bc1288c502ce42de818fbf429f8370f5b2
3
+ size 15618
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61b7ada42c60fe4d82f8a88aa00fcd8b1993f7163e532eacabdf48ed40fe4578
3
- size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7ab063a3e2ac331c4fbea43e632b2ee7ef1029442c59327726294ac03a99a1
3
+ size 5432