Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -42,16 +42,20 @@ newmodel = PeftModel.from_pretrained(newmodel, peft_model_id,
|
|
42 |
use_auth_token="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", load_in_8bit=True, device_map='cpu')
|
43 |
|
44 |
def givetext(input_text,lmodel,ltokenizer):
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
55 |
|
56 |
def mental_chat(message, history):
|
57 |
return givetext(message,newmodel,newtokenizer)
|
|
|
42 |
use_auth_token="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", load_in_8bit=True, device_map='cpu')
|
43 |
|
44 |
def givetext(input_text,lmodel,ltokenizer):
|
45 |
+
try:
|
46 |
+
eval_prompt_pt1 = "\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction: Act like a therapist and respond\n\n### Input: "
|
47 |
+
eval_prompt_pt2="\n\n\n### Response:\n"
|
48 |
+
eval_prompt=eval_prompt_pt1+input_text+eval_prompt_pt2
|
49 |
+
print(eval_prompt,"\n\n")
|
50 |
+
model_input = ltokenizer(eval_prompt, return_tensors="pt").to("cpu")
|
51 |
+
|
52 |
+
lmodel.eval()
|
53 |
+
with torch.no_grad():
|
54 |
+
return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=1000)[0], skip_special_tokens=True))
|
55 |
+
#return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=100)[0], skip_special_tokens=True))
|
56 |
+
except Exception as error:
|
57 |
+
print("Exception error}".format(error = error))
|
58 |
+
#txt1 = "My name is {fname}, I'm {age}".format(fname = "John", age = 36)
|
59 |
|
60 |
def mental_chat(message, history):
|
61 |
return givetext(message,newmodel,newtokenizer)
|