Tehniyat commited on
Commit
440e768
1 Parent(s): 6f240b1

Upload model

Browse files
Files changed (3) hide show
  1. README.md +21 -0
  2. adapter_config.json +6 -2
  3. adapter_model.bin +1 -1
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
adapter_config.json CHANGED
@@ -1,14 +1,18 @@
1
  {
 
2
  "base_model_name_or_path": "facebook/opt-6.7b",
3
  "bias": "none",
4
- "enable_lora": null,
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
 
 
7
  "lora_alpha": 32,
8
  "lora_dropout": 0.05,
9
- "merge_weights": false,
10
  "peft_type": "LORA",
11
  "r": 16,
 
12
  "target_modules": [
13
  "q_proj",
14
  "v_proj"
 
1
  {
2
+ "auto_mapping": null,
3
  "base_model_name_or_path": "facebook/opt-6.7b",
4
  "bias": "none",
 
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
  "lora_alpha": 32,
11
  "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
  "peft_type": "LORA",
14
  "r": 16,
15
+ "revision": null,
16
  "target_modules": [
17
  "q_proj",
18
  "v_proj"
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75eda1c62cb8fc1a897d7b810379a3b41b66223043d5f38e923627312f20e344
3
  size 33601485
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:748a2766c46fbff00efa0064afbe63349b94b23c5d933cc1ff8b5c610222b615
3
  size 33601485