winglian commited on
Commit
739f132
β€’
1 Parent(s): 5dcf14e

open orca preview

Browse files

fix tabs
add start message
fix clear chat
setup queue
setup api vars from env
use .get for env
debug env vars
fix import

Files changed (2) hide show
  1. app.py +107 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import logging
4
+ import gradio as gr
5
+ import openai
6
+
7
+ print(os.environ)
8
+ openai.api_base = os.environ.get("OPENAI_API_BASE")
9
+ openai.api_key = os.environ.get("OPENAI_API_KEY")
10
+
11
+ def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
12
+ completion = openai.Completion.create(model="Open-Orca/OpenOrcaxOpenChat-Preview2-13B", prompt=prompt, max_tokens=max_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, stream=True)
13
+ for chunk in completion:
14
+ yield chunk["choices"][0]["text"]
15
+
16
+ def delay_typer(words, delay=0.8):
17
+ tokens = re.findall(r'\s*\S+\s*', words)
18
+ for s in tokens:
19
+ yield s
20
+ sleep(delay)
21
+
22
+
23
+ def clear_chat(chat_history_state, chat_message):
24
+ chat_history_state = []
25
+ chat_message = ''
26
+ return chat_history_state, chat_message
27
+
28
+
29
+ def user(message, history):
30
+ history = history or []
31
+ # Append the user's message to the conversation history
32
+ history.append([message, ""])
33
+ return "", history
34
+
35
+
36
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
37
+ history = history or []
38
+
39
+ messages = system_message.strip() + "\n" + \
40
+ "\n".join(["\n".join(["User: "+item[0]+"<|end_of_turn|>", "Assistant: "+item[1]+"<|end_of_turn|>"])
41
+ for item in history])
42
+ # strip the last `<|end_of_turn|>` from the messages
43
+ messages = messages.rstrip("<|end_of_turn|>")
44
+ # remove last space from assistant, some models output a ZWSP if you leave a space
45
+ messages = messages.rstrip()
46
+
47
+ prediction = make_prediction(
48
+ messages,
49
+ max_tokens=max_tokens,
50
+ temperature=temperature,
51
+ top_p=top_p,
52
+ top_k=top_k,
53
+ repetition_penalty=repetition_penalty,
54
+ )
55
+ for tokens in prediction:
56
+ tokens = re.findall(r'\s*\S+\s*', tokens)
57
+ for s in tokens:
58
+ answer = s
59
+ history[-1][1] += answer
60
+ # stream the response
61
+ yield history, history, ""
62
+
63
+
64
+ start_message = ""
65
+
66
+ with gr.Blocks() as demo:
67
+ with gr.Row():
68
+ with gr.Column():
69
+ gr.Markdown(f"""
70
+ ### Brought to you by OpenChat x OpenOrca
71
+ """)
72
+ with gr.Tab("Chatbot"):
73
+ gr.Markdown("# πŸ‹ OpenChat x OpenOrca-Preview2 GGML Playground Space! πŸ‹")
74
+ chatbot = gr.Chatbot()
75
+ with gr.Row():
76
+ message = gr.Textbox(
77
+ label="What do you want to chat about?",
78
+ placeholder="Ask me anything.",
79
+ lines=3,
80
+ )
81
+ with gr.Row():
82
+ submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
83
+ clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
84
+ stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
85
+ with gr.Row():
86
+ with gr.Column():
87
+ max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300)
88
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8)
89
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
90
+ top_k = gr.Slider(0, 100, label="Top K", step=1, value=40)
91
+ repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
92
+
93
+ system_msg = gr.Textbox(
94
+ start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5)
95
+
96
+ chat_history_state = gr.State()
97
+ clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
98
+ clear.click(lambda: None, None, chatbot, queue=False)
99
+
100
+ submit_click_event = submit.click(
101
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
102
+ ).then(
103
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot, chat_history_state, message], queue=True
104
+ )
105
+ stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event], queue=False)
106
+
107
+ demo.queue(max_size=48, concurrency_count=16).launch(debug=True, server_name="0.0.0.0", server_port=7860)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ requests
2
+ openai