wangzhang commited on
Commit
c8221b1
1 Parent(s): 29ee671

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +26 -1
README.md CHANGED
@@ -9,4 +9,29 @@ datasets:
9
  library_name: adapter-transformers
10
  ---
11
 
12
- # This is a private NLP model trained with data from SequioaDB
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  library_name: adapter-transformers
10
  ---
11
 
12
+ # This is a private NLP model trained with data from SequioaDB
13
+
14
+ ```
15
+ import torch
16
+ from peft import PeftModel
17
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
18
+
19
+ model_name = "TinyPixel/Llama-2-7B-bf16-sharded"
20
+ adapters_name = 'wangzhang/Llama2-sequoiaDB'
21
+
22
+ model = AutoModelForCausalLM.from_pretrained(
23
+ model_name,
24
+ load_in_4bit=True,
25
+ torch_dtype=torch.bfloat16,
26
+ device_map="auto",
27
+ max_memory= {i: '24000MB' for i in range(torch.cuda.device_count())},
28
+ quantization_config=BitsAndBytesConfig(
29
+ load_in_4bit=True,
30
+ bnb_4bit_compute_dtype=torch.bfloat16,
31
+ bnb_4bit_use_double_quant=True,
32
+ bnb_4bit_quant_type='nf4'
33
+ ),
34
+ )
35
+ model = PeftModel.from_pretrained(model, adapters_name)
36
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
37
+ ```