reach-vb HF staff commited on
Commit
29139b7
1 Parent(s): 977ab33

Update README.md (#5)

Browse files

- Update README.md (4409dd776fb4484cb7b8c76768ad8d4d035a51a8)

Files changed (1) hide show
  1. README.md +12 -7
README.md CHANGED
@@ -111,7 +111,10 @@ print(tokenizer.decode(outputs[0]))
111
  from transformers import AutoTokenizer, AutoModelForCausalLM
112
 
113
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
114
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2-9b-it", device_map="auto", torch_dtype=torch.bfloat16)
 
 
 
115
 
116
  input_text = "Write me a poem about Machine Learning."
117
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -129,8 +132,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
129
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
130
  model = AutoModelForCausalLM.from_pretrained(
131
  "google/gemma-2-9b-it",
132
- device_map="auto"
133
- )
134
 
135
  input_text = "Write me a poem about Machine Learning."
136
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -150,7 +152,9 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
150
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
151
 
152
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
153
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2-9b-it", quantization_config=quantization_config)
 
 
154
 
155
  input_text = "Write me a poem about Machine Learning."
156
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -168,7 +172,9 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
168
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
169
 
170
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
171
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2-9b-it", quantization_config=quantization_config)
 
 
172
 
173
  input_text = "Write me a poem about Machine Learning."
174
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
@@ -211,8 +217,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
211
  model = AutoModelForCausalLM.from_pretrained(
212
  model_id,
213
  device_map="cuda",
214
- torch_dtype=dtype,
215
- )
216
 
217
  chat = [
218
  { "role": "user", "content": "Write a hello world program" },
 
111
  from transformers import AutoTokenizer, AutoModelForCausalLM
112
 
113
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
114
+ model = AutoModelForCausalLM.from_pretrained(
115
+ "google/gemma-2-9b-it",
116
+ device_map="auto",
117
+ torch_dtype=torch.bfloat16)
118
 
119
  input_text = "Write me a poem about Machine Learning."
120
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
132
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
133
  model = AutoModelForCausalLM.from_pretrained(
134
  "google/gemma-2-9b-it",
135
+ device_map="auto")
 
136
 
137
  input_text = "Write me a poem about Machine Learning."
138
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
152
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
153
 
154
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
155
+ model = AutoModelForCausalLM.from_pretrained(
156
+ "google/gemma-2-9b-it",
157
+ quantization_config=quantization_config)
158
 
159
  input_text = "Write me a poem about Machine Learning."
160
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
172
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
173
 
174
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
175
+ model = AutoModelForCausalLM.from_pretrained(
176
+ "google/gemma-2-9b-it",
177
+ quantization_config=quantization_config)
178
 
179
  input_text = "Write me a poem about Machine Learning."
180
  input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
 
217
  model = AutoModelForCausalLM.from_pretrained(
218
  model_id,
219
  device_map="cuda",
220
+ torch_dtype=dtype,)
 
221
 
222
  chat = [
223
  { "role": "user", "content": "Write a hello world program" },