Charishma010997 commited on
Commit
efdfeaa
1 Parent(s): b3a6b02

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +47 -0
  2. adapter_config.json +20 -0
  3. adapter_model.bin +3 -0
  4. config.json +1 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: True
29
+ - bnb_4bit_compute_dtype: bfloat16
30
+
31
+ The following `bitsandbytes` quantization config was used during training:
32
+ - quant_method: bitsandbytes
33
+ - load_in_8bit: False
34
+ - load_in_4bit: True
35
+ - llm_int8_threshold: 6.0
36
+ - llm_int8_skip_modules: None
37
+ - llm_int8_enable_fp32_cpu_offload: False
38
+ - llm_int8_has_fp16_weight: False
39
+ - bnb_4bit_quant_type: nf4
40
+ - bnb_4bit_use_double_quant: True
41
+ - bnb_4bit_compute_dtype: bfloat16
42
+ ### Framework versions
43
+
44
+ - PEFT 0.5.0.dev0
45
+ - PEFT 0.5.0.dev0
46
+
47
+ - PEFT 0.5.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "vilsonrodrigues/falcon-7b-instruct-sharded",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "query_key_value"
18
+ ],
19
+ "task_type": "CAUSAL_LM"
20
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcaef40a662eb996c3f2caa07d293a248e5e20f8d328476ab399325563b1b984
3
+ size 18898161
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"vocab_size": 65024, "hidden_size": 4544, "num_hidden_layers": 32, "num_attention_heads": 71, "layer_norm_epsilon": 1e-05, "initializer_range": 0.02, "use_cache": true, "hidden_dropout": 0.0, "attention_dropout": 0.0, "bos_token_id": 11, "eos_token_id": 11, "num_kv_heads": 71, "alibi": false, "new_decoder_architecture": false, "multi_query": true, "parallel_attn": true, "bias": false, "return_dict": true, "output_hidden_states": false, "output_attentions": false, "torchscript": false, "torch_dtype": "bfloat16", "use_bfloat16": false, "tf_legacy_loss": false, "pruned_heads": {}, "tie_word_embeddings": true, "is_encoder_decoder": false, "is_decoder": false, "cross_attention_hidden_size": null, "add_cross_attention": false, "tie_encoder_decoder": false, "max_length": 20, "min_length": 0, "do_sample": false, "early_stopping": false, "num_beams": 1, "num_beam_groups": 1, "diversity_penalty": 0.0, "temperature": 1.0, "top_k": 50, "top_p": 1.0, "typical_p": 1.0, "repetition_penalty": 1.0, "length_penalty": 1.0, "no_repeat_ngram_size": 0, "encoder_no_repeat_ngram_size": 0, "bad_words_ids": null, "num_return_sequences": 1, "chunk_size_feed_forward": 0, "output_scores": false, "return_dict_in_generate": false, "forced_bos_token_id": null, "forced_eos_token_id": null, "remove_invalid_values": false, "exponential_decay_length_penalty": null, "suppress_tokens": null, "begin_suppress_tokens": null, "architectures": ["FalconForCausalLM"], "finetuning_task": null, "id2label": {"0": "LABEL_0", "1": "LABEL_1"}, "label2id": {"LABEL_0": 0, "LABEL_1": 1}, "tokenizer_class": null, "prefix": null, "pad_token_id": null, "sep_token_id": null, "decoder_start_token_id": null, "task_specific_params": null, "problem_type": null, "_name_or_path": "vilsonrodrigues/falcon-7b-instruct-sharded", "transformers_version": "4.32.0.dev0", "apply_residual_connection_post_layernorm": false, "auto_map": {"AutoConfig": "vilsonrodrigues/falcon-7b-instruct-sharded--configuration_falcon.FalconConfig", "AutoModel": "vilsonrodrigues/falcon-7b-instruct-sharded--modeling_falcon.FalconModel", "AutoModelForSequenceClassification": "vilsonrodrigues/falcon-7b-instruct-sharded--modeling_falcon.FalconForSequenceClassification", "AutoModelForTokenClassification": "vilsonrodrigues/falcon-7b-instruct-sharded--modeling_falcon.FalconForTokenClassification", "AutoModelForQuestionAnswering": "vilsonrodrigues/falcon-7b-instruct-sharded--modeling_falcon.FalconForQuestionAnswering", "AutoModelForCausalLM": "vilsonrodrigues/falcon-7b-instruct-sharded--modeling_falcon.FalconForCausalLM"}, "model_type": "falcon", "quantization_config": {"quant_method": "bitsandbytes", "load_in_8bit": false, "load_in_4bit": true, "llm_int8_threshold": 6.0, "llm_int8_skip_modules": null, "llm_int8_enable_fp32_cpu_offload": false, "llm_int8_has_fp16_weight": false, "bnb_4bit_quant_type": "nf4", "bnb_4bit_use_double_quant": true, "bnb_4bit_compute_dtype": "bfloat16"}}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1ddbe5e3c709bb7c701dbb814e2e240a442e499eee6e24cf68359f18a58efd0
3
+ size 4027