Spaces:
Runtime error
Runtime error
Yash Sachdeva
commited on
Commit
·
72231f4
1
Parent(s):
18dd69a
solar
Browse files- question_paper.py +8 -11
question_paper.py
CHANGED
@@ -12,15 +12,12 @@ TOKENIZER = None
|
|
12 |
|
13 |
@app.get("/")
|
14 |
def llama():
|
15 |
-
|
16 |
-
inputs = TOKENIZER(
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
eos_token_id=TOKENIZER.eos_token_id,
|
22 |
-
)
|
23 |
-
tresponse = TOKENIZER.decode(outputs[0], skip_special_tokens=True)
|
24 |
print(tresponse)
|
25 |
|
26 |
return tresponse
|
@@ -31,6 +28,6 @@ def init_model():
|
|
31 |
global TOKENIZER
|
32 |
if not MODEL:
|
33 |
print("loading model")
|
34 |
-
TOKENIZER = AutoTokenizer.from_pretrained(
|
35 |
-
MODEL = AutoModelForCausalLM.from_pretrained(
|
36 |
print("loaded model")
|
|
|
12 |
|
13 |
@app.get("/")
|
14 |
def llama():
|
15 |
+
prompt = [{'role': 'user', 'content': 'List 3 synonyms for the word "tiny"'}]
|
16 |
+
inputs = TOKENIZER.apply_chat_template( prompt, add_generation_prompt=True, return_tensors='pt' )
|
17 |
+
|
18 |
+
tokens = MODEL.generate( inputs.to(MODEL.device), max_new_tokens=1024, temperature=0.3, do_sample=True)
|
19 |
+
|
20 |
+
tresponse = TOKENIZER.decode(tokens[0], skip_special_tokens=False)
|
|
|
|
|
|
|
21 |
print(tresponse)
|
22 |
|
23 |
return tresponse
|
|
|
28 |
global TOKENIZER
|
29 |
if not MODEL:
|
30 |
print("loading model")
|
31 |
+
TOKENIZER = AutoTokenizer.from_pretrained('stabilityai/stablelm-zephyr-3b')
|
32 |
+
MODEL = AutoModelForCausalLM.from_pretrained('stabilityai/stablelm-zephyr-3b', device_map="auto")
|
33 |
print("loaded model")
|