Spaces:
Sleeping
Sleeping
Sethblocks
commited on
Commit
•
eea8986
1
Parent(s):
c57cfba
remove unused code
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import os
|
|
6 |
import spaces
|
7 |
randtxt = ""
|
8 |
print("downloading!")
|
9 |
-
|
10 |
llama = llama_cpp.Llama("Meta-Llama-3-8B-Instruct.Q4_0.gguf", chat_format="llama-3")
|
11 |
|
12 |
def randomize():
|
@@ -16,11 +16,6 @@ def randomize():
|
|
16 |
genTurn()
|
17 |
|
18 |
|
19 |
-
#chat = [{"role": "system", "content": "The following is a never-ending chat between Berry and Llama. Berry is the personification of a raspberry. Llama is Berry's best friend. They already know each other well. The chat will not end but may cut to a later date after a chat. They try to use relatively short responses no longer than 5 sentences. "},
|
20 |
-
# {"role": "user", "content": "berry: Good morning"}] # POV: llama is "assistant"
|
21 |
-
#print(chat[len(chat)-1]["content"])
|
22 |
-
|
23 |
-
|
24 |
#llama
|
25 |
|
26 |
def reversechat(chat):
|
@@ -58,49 +53,6 @@ def genTurn():
|
|
58 |
|
59 |
|
60 |
|
61 |
-
def watch(prompt):
|
62 |
-
global chara
|
63 |
-
global chat
|
64 |
-
prompt.append(("hi", "yo"))
|
65 |
-
return prompt
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
chat[0] = {"role": "system", "content": prompt}
|
70 |
-
chat[1] = {"role": "user", "content": c2 + ": " + msg1}
|
71 |
-
|
72 |
-
#Generate message
|
73 |
-
try:
|
74 |
-
if chara == "a":
|
75 |
-
msg = llama.create_chat_completion(chat, max_tokens=200)["choices"][0]["message"]["content"]
|
76 |
-
chat.append({"role": "assistant", "content": msg.removesuffix("<|eot_id|>")})
|
77 |
-
else:
|
78 |
-
#Arteex
|
79 |
-
msg = llama.create_chat_completion(reversechat(chat), max_tokens=200)["choices"][0]["message"]["content"]
|
80 |
-
chat.append({"role": "user", "content": msg.removesuffix("<|eot_id|>")})
|
81 |
-
print(msg)
|
82 |
-
except:
|
83 |
-
print("this chat is over now :(")
|
84 |
-
chara ="a"
|
85 |
-
chat = [{"role": "system", "content": prompt},
|
86 |
-
{"role": "user", "content": c2 + ": " + msg1}]
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
msgsview = []
|
91 |
-
for msg in chat:
|
92 |
-
if msg["role"] == "system":
|
93 |
-
pass
|
94 |
-
else:
|
95 |
-
if not msg["content"].lower().startswith("llama:"):
|
96 |
-
msgsview.append((msg["content"], None))
|
97 |
-
else:
|
98 |
-
msgsview.append((None, msg["content"]))
|
99 |
-
yield msgsview
|
100 |
-
|
101 |
-
#demo = gr.Interface(watch,inputs=None, outputs=gr.Chatbot(), live=True, description="click generate to show latest chat!", title="LlamaLive, watch an llm conversation!")
|
102 |
-
|
103 |
-
#randomize()
|
104 |
|
105 |
import time
|
106 |
with gr.Blocks() as demo:
|
@@ -164,24 +116,3 @@ with gr.Blocks() as demo:
|
|
164 |
|
165 |
if __name__ == "__main__":
|
166 |
demo.launch()
|
167 |
-
|
168 |
-
exit()
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
print(chat)
|
177 |
-
|
178 |
-
if __name__ == "__main__":
|
179 |
-
|
180 |
-
#Thread(target=randomize).start() bad idea running llm 24/7 for no reason
|
181 |
-
with gr.Blocks() as demo:
|
182 |
-
gr.Markdown("# LlamaLive\nwatch a live interaction between 2 chatbots!")
|
183 |
-
cb = gr.Chatbot()
|
184 |
-
cb.value=([(None, "testing")])
|
185 |
-
btn = gr.Button()
|
186 |
-
btn.click(watch, inputs=[cb], outputs=[cb])
|
187 |
-
demo.launch()
|
|
|
6 |
import spaces
|
7 |
randtxt = ""
|
8 |
print("downloading!")
|
9 |
+
|
10 |
llama = llama_cpp.Llama("Meta-Llama-3-8B-Instruct.Q4_0.gguf", chat_format="llama-3")
|
11 |
|
12 |
def randomize():
|
|
|
16 |
genTurn()
|
17 |
|
18 |
|
|
|
|
|
|
|
|
|
|
|
19 |
#llama
|
20 |
|
21 |
def reversechat(chat):
|
|
|
53 |
|
54 |
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
import time
|
58 |
with gr.Blocks() as demo:
|
|
|
116 |
|
117 |
if __name__ == "__main__":
|
118 |
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|