suarkadipa commited on
Commit
011bc66
1 Parent(s): e16f336

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +42 -1
README.md CHANGED
@@ -1,4 +1,45 @@
1
  ---
2
  datasets:
3
  - Abirate/english_quotes
4
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  datasets:
3
  - Abirate/english_quotes
4
+ pipeline_tag: text-generation
5
+ ---
6
+
7
+ # How to run in Google Colab
8
+
9
+ Note: must be run in GPU
10
+ ```python
11
+ !pip install -q -U bitsandbytes
12
+ !pip install -q -U git+https://github.com/huggingface/transformers.git
13
+ !pip install -q -U git+https://github.com/huggingface/peft.git
14
+ ```
15
+ ```python
16
+ import torch
17
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
18
+
19
+ model_id = "EleutherAI/gpt-neox-20b"
20
+ bnb_config = BitsAndBytesConfig(
21
+ load_in_4bit=True,
22
+ bnb_4bit_use_double_quant=True,
23
+ bnb_4bit_quant_type="nf4",
24
+ bnb_4bit_compute_dtype=torch.bfloat16
25
+ )
26
+
27
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
28
+ model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"":0})
29
+ ```
30
+ ```python
31
+ from peft import LoraConfig, get_peft_model
32
+
33
+ lora_config = LoraConfig.from_pretrained('suarkadipa/gpt-neox-20b-english-quotes')
34
+ model = get_peft_model(model, lora_config)
35
+ ```
36
+ ```python
37
+ text = "Yaya Toure "
38
+ device = "cuda:0"
39
+
40
+ inputs = tokenizer(text, return_tensors="pt").to(device)
41
+ outputs = model.generate(**inputs, max_new_tokens=20)
42
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
43
+
44
+ #output example: Yaya Touré was born in the Ivory Coast, but moved to France at the age
45
+ ```