mayank-mishra commited on
Commit
91739d1
1 Parent(s): b3cb77e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -226,7 +226,7 @@ This is a simple example of how to use **Granite-20B-Code-Instruct** model.
226
  import torch
227
  from transformers import AutoModelForCausalLM, AutoTokenizer
228
  device = "cuda" # or "cpu"
229
- model_path = "granite-8b-code-instruct"
230
  tokenizer = AutoTokenizer.from_pretrained(model_path)
231
  # drop device_map if running on CPU
232
  model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)
 
226
  import torch
227
  from transformers import AutoModelForCausalLM, AutoTokenizer
228
  device = "cuda" # or "cpu"
229
+ model_path = "ibm-granite/granite-20b-code-instruct"
230
  tokenizer = AutoTokenizer.from_pretrained(model_path)
231
  # drop device_map if running on CPU
232
  model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)