aengusl commited on
Commit
d8c9c57
1 Parent(s): ce53e88

Model save

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ model-index:
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/quirky_lats_at_mats/harmbench_adv_training/runs/6vdb30ky)
20
  # llama2-7b-sft-lora
21
 
22
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the generator dataset.
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/quirky_lats_at_mats/harmbench_adv_training/runs/vje92zdz)
20
  # llama2-7b-sft-lora
21
 
22
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the generator dataset.
adapter_config.json CHANGED
@@ -17,11 +17,11 @@
17
  "revision": null,
18
  "target_modules": [
19
  "q_proj",
20
- "down_proj",
21
  "k_proj",
22
  "v_proj",
23
- "o_proj",
24
- "up_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
 
17
  "revision": null,
18
  "target_modules": [
19
  "q_proj",
20
+ "o_proj",
21
  "k_proj",
22
  "v_proj",
23
+ "up_proj",
24
+ "down_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
runs/Jul05_18-13-58_a3efa988be12/events.out.tfevents.1720203478.a3efa988be12.4220.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70cd1d8a7a09de3707547f24a30ec66a14b871e4e6508c45281fc34de0f678f1
3
+ size 5080
step_0/adapter_config.json CHANGED
@@ -17,11 +17,11 @@
17
  "revision": null,
18
  "target_modules": [
19
  "q_proj",
20
- "down_proj",
21
  "k_proj",
22
  "v_proj",
23
- "o_proj",
24
- "up_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
 
17
  "revision": null,
18
  "target_modules": [
19
  "q_proj",
20
+ "o_proj",
21
  "k_proj",
22
  "v_proj",
23
+ "up_proj",
24
+ "down_proj"
25
  ],
26
  "task_type": "CAUSAL_LM"
27
  }
step_0/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:140178cd9179975593c70e512f6cadc0746f317c7aa23cadbdbb19adb2a4af40
3
  size 6200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d3a406bfc67294221e1a1bf0ff6ff1c2d0f224d7e42e759829dc9dcd44dbfae
3
  size 6200
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:140178cd9179975593c70e512f6cadc0746f317c7aa23cadbdbb19adb2a4af40
3
  size 6200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d3a406bfc67294221e1a1bf0ff6ff1c2d0f224d7e42e759829dc9dcd44dbfae
3
  size 6200