aengusl commited on
Commit
72e1ef7
1 Parent(s): 1a3d693

Model save

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ model-index:
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/quirky_lats_at_mats/harmbench_adv_training/runs/16agdzmc)
20
  # llama2-7b-sft-lora
21
 
22
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the generator dataset.
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/quirky_lats_at_mats/harmbench_adv_training/runs/ghnorvon)
20
  # llama2-7b-sft-lora
21
 
22
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the generator dataset.
adapter_config.json CHANGED
@@ -16,11 +16,11 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "up_proj",
20
  "o_proj",
 
 
21
  "down_proj",
22
  "v_proj",
23
- "q_proj",
24
  "k_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
19
  "o_proj",
20
+ "up_proj",
21
+ "q_proj",
22
  "down_proj",
23
  "v_proj",
 
24
  "k_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
runs/Jun10_17-01-57_493cf1de940b/events.out.tfevents.1718038947.493cf1de940b.18909.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ffc2ae2102bc475793cfdc7f561ce79350f32330475320e0e75fe3aece8766d
3
- size 5069
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5b447d39998043db3dd2fab3542391754e99f677f78cf0d4026a68b571f1869
3
+ size 5223
runs/Jun10_17-06-06_493cf1de940b/events.out.tfevents.1718039203.493cf1de940b.20497.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4272af0ba8bd4d0331f7a4382d16f9a84dadafcc6234699e30a44d18aa61f9a6
3
+ size 5069
step_0/README.md CHANGED
@@ -602,4 +602,11 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
602
  ### Framework versions
603
 
604
 
 
 
 
 
 
 
 
605
  - PEFT 0.6.1
 
602
  ### Framework versions
603
 
604
 
605
+ - PEFT 0.6.1
606
+ ## Training procedure
607
+
608
+
609
+ ### Framework versions
610
+
611
+
612
  - PEFT 0.6.1
step_0/adapter_config.json CHANGED
@@ -16,11 +16,11 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "up_proj",
20
  "o_proj",
 
 
21
  "down_proj",
22
  "v_proj",
23
- "q_proj",
24
  "k_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
19
  "o_proj",
20
+ "up_proj",
21
+ "q_proj",
22
  "down_proj",
23
  "v_proj",
 
24
  "k_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
step_0/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b792b2c100a5467b9311492fcccc782ddf2207bc55f1c7c79818fec68c7f1d2
3
  size 6200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:531230acb049f42ed147ac0f434c35a4b169fc9c89780fba33c47299b72c1ca5
3
  size 6200
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b792b2c100a5467b9311492fcccc782ddf2207bc55f1c7c79818fec68c7f1d2
3
  size 6200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:531230acb049f42ed147ac0f434c35a4b169fc9c89780fba33c47299b72c1ca5
3
  size 6200