Ubuntu commited on
Commit
8d4df10
1 Parent(s): 4658d25

Use hub model for inference

Browse files
Files changed (1) hide show
  1. llama2-inference.py +1 -1
llama2-inference.py CHANGED
@@ -2,7 +2,7 @@ from transformers import AutoTokenizer
2
  import transformers
3
  import torch
4
 
5
- model = "./output"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model)
8
  pipeline = transformers.pipeline(
 
2
  import transformers
3
  import torch
4
 
5
+ model = "juliensimon/llama2-7b-qlora-openassistant-guanaco"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model)
8
  pipeline = transformers.pipeline(