MuskanZehra5 commited on
Commit
f7d4e42
·
1 Parent(s): d58c3be

Upload skin analysis model

Browse files
adapter_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "D:/Zanny/Mistral/mistral-7b",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.1,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 64,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "v_proj",
28
+ "o_proj",
29
+ "k_proj",
30
+ "q_proj"
31
+ ],
32
+ "task_type": "CAUSAL_LM",
33
+ "trainable_token_indices": null,
34
+ "use_dora": false,
35
+ "use_rslora": false
36
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:964690eb3c6f84df534956ed64509b3c28e2df09d9c2f5ef4549681962929b5e
3
+ size 218138576
mistral_download_once.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
2
+ import torch
3
+
4
+ model_id = "mistralai/Mistral-7B-Instruct-v0.1"
5
+ save_path = "D:/Zanny/Mistral/mistral-7b"
6
+
7
+ bnb_config = BitsAndBytesConfig(
8
+ load_in_4bit=True,
9
+ bnb_4bit_use_double_quant=True,
10
+ bnb_4bit_quant_type="nf4",
11
+ bnb_4bit_compute_dtype=torch.float16,
12
+ )
13
+
14
+ print("🔽 Downloading model shards with 4-bit quantization...")
15
+
16
+ model = AutoModelForCausalLM.from_pretrained(
17
+ model_id,
18
+ device_map="auto",
19
+ quantization_config=bnb_config,
20
+ trust_remote_code=True
21
+ )
22
+ model.save_pretrained(save_path)
23
+
24
+ print("🔽 Downloading tokenizer...")
25
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
26
+ tokenizer.save_pretrained(save_path)
27
+
28
+ print(f"✅ Model + tokenizer saved to: {save_path}")
29
+
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5de3f60f520e337fbd99b5de1ed4ff34662fdbb69cd269b240836adeeb5ce21b
3
+ size 5624