Spaces:
Runtime error
Runtime error
Sethblocks
commited on
Commit
•
69ae2b3
1
Parent(s):
e1ca0ac
add better error output for debug
Browse files- app.py +3 -3
- appgguf.py +4 -4
app.py
CHANGED
@@ -69,7 +69,7 @@ def genTurn():
|
|
69 |
global chat
|
70 |
try:
|
71 |
if chara == "a":
|
72 |
-
msg = llm(chat)
|
73 |
chat.append({"role": "assistant", "content": msg.removesuffix("<|eot_id|>")})
|
74 |
else:
|
75 |
#Arteex
|
@@ -125,8 +125,8 @@ with gr.Blocks() as demo:
|
|
125 |
msg = llama.create_chat_completion(reversechat(chat), max_tokens=200)["choices"][0]["message"]["content"]
|
126 |
chat.append({"role": "user", "content": msg.removesuffix("<|eot_id|>")})
|
127 |
print(msg)
|
128 |
-
except:
|
129 |
-
print("this chat is over now :(")
|
130 |
chara ="a"
|
131 |
chat = [{"role": "system", "content": prompt},
|
132 |
{"role": "user", "content": c2 + ": " + msg1}]
|
|
|
69 |
global chat
|
70 |
try:
|
71 |
if chara == "a":
|
72 |
+
msg = llm(chat)["choices"][0]["message"]["content"]
|
73 |
chat.append({"role": "assistant", "content": msg.removesuffix("<|eot_id|>")})
|
74 |
else:
|
75 |
#Arteex
|
|
|
125 |
msg = llama.create_chat_completion(reversechat(chat), max_tokens=200)["choices"][0]["message"]["content"]
|
126 |
chat.append({"role": "user", "content": msg.removesuffix("<|eot_id|>")})
|
127 |
print(msg)
|
128 |
+
except Exception as err:
|
129 |
+
print("this chat is over now :( | ", err)
|
130 |
chara ="a"
|
131 |
chat = [{"role": "system", "content": prompt},
|
132 |
{"role": "user", "content": c2 + ": " + msg1}]
|
appgguf.py
CHANGED
@@ -3,7 +3,7 @@ from threading import Thread
|
|
3 |
import random
|
4 |
import llama_cpp
|
5 |
import os
|
6 |
-
import spaces
|
7 |
randtxt = ""
|
8 |
print("downloading!")
|
9 |
|
@@ -67,7 +67,7 @@ with gr.Blocks() as demo:
|
|
67 |
stop=1
|
68 |
stopbtn.click(None, js="window.location.reload()")
|
69 |
|
70 |
-
|
71 |
def watch(prompt):
|
72 |
global chara
|
73 |
global chat
|
@@ -93,8 +93,8 @@ with gr.Blocks() as demo:
|
|
93 |
msg = llama.create_chat_completion(reversechat(chat), max_tokens=200)["choices"][0]["message"]["content"]
|
94 |
chat.append({"role": "user", "content": msg.removesuffix("<|eot_id|>")})
|
95 |
print(msg)
|
96 |
-
except:
|
97 |
-
print("this chat is over now :(")
|
98 |
chara ="a"
|
99 |
chat = [{"role": "system", "content": prompt},
|
100 |
{"role": "user", "content": c2 + ": " + msg1}]
|
|
|
3 |
import random
|
4 |
import llama_cpp
|
5 |
import os
|
6 |
+
#import spaces
|
7 |
randtxt = ""
|
8 |
print("downloading!")
|
9 |
|
|
|
67 |
stop=1
|
68 |
stopbtn.click(None, js="window.location.reload()")
|
69 |
|
70 |
+
#@spaces.GPU
|
71 |
def watch(prompt):
|
72 |
global chara
|
73 |
global chat
|
|
|
93 |
msg = llama.create_chat_completion(reversechat(chat), max_tokens=200)["choices"][0]["message"]["content"]
|
94 |
chat.append({"role": "user", "content": msg.removesuffix("<|eot_id|>")})
|
95 |
print(msg)
|
96 |
+
except Exception as err:
|
97 |
+
print("this chat is over now :( | ", err)
|
98 |
chara ="a"
|
99 |
chat = [{"role": "system", "content": prompt},
|
100 |
{"role": "user", "content": c2 + ": " + msg1}]
|