zhangjf commited on
Commit
c380d04
1 Parent(s): 1111493

Update to_md function for illustrating code blocks

Browse files
Files changed (1) hide show
  1. app.py +128 -1
app.py CHANGED
@@ -1,3 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def to_md(content):
2
  is_inside_code_block = False
3
  output_spans = []
@@ -35,4 +101,65 @@ def to_md(content):
35
  output_spans.append("\n")
36
  else:
37
  output_spans.append(content[i])
38
- return "".join(output_spans)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import tiktoken
3
+
4
+ import datetime
5
+ import json
6
+ import os
7
+
8
+ openai.api_key = os.getenv('API_KEY')
9
+ openai.request_times = 0
10
+
11
+ def ask(question, history, behavior):
12
+ openai.request_times += 1
13
+ print(f"request times {openai.request_times}: {datetime.datetime.now()}: {question}")
14
+ try:
15
+ response = openai.ChatCompletion.create(
16
+ model="gpt-3.5-turbo",
17
+ messages=forget_long_term(
18
+ [
19
+ {"role":"system", "content":content}
20
+ for content in behavior
21
+ ] + [
22
+ {"role":"user" if i%2==0 else "assistant", "content":content}
23
+ for i,content in enumerate(history + [question])
24
+ ]
25
+ )
26
+ )["choices"][0]["message"]["content"]
27
+ while response.startswith("\n"):
28
+ response = response[1:]
29
+ except Exception as e:
30
+ print(e)
31
+ response = 'Timeout! Please wait a few minutes and retry'
32
+ history = history + [question, response]
33
+ return history
34
+
35
+ def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
36
+ """Returns the number of tokens used by a list of messages."""
37
+ try:
38
+ encoding = tiktoken.encoding_for_model(model)
39
+ except KeyError:
40
+ encoding = tiktoken.get_encoding("cl100k_base")
41
+ if model == "gpt-3.5-turbo": # note: future models may deviate from this
42
+ num_tokens = 0
43
+ for message in messages:
44
+ num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
45
+ for key, value in message.items():
46
+ num_tokens += len(encoding.encode(value))
47
+ if key == "name": # if there's a name, the role is omitted
48
+ num_tokens += -1 # role is always required and always 1 token
49
+ num_tokens += 2 # every reply is primed with <im_start>assistant
50
+ return num_tokens
51
+ else:
52
+ raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
53
+ See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
54
+
55
+ def forget_long_term(messages, max_num_tokens=4000):
56
+ while num_tokens_from_messages(messages)>max_num_tokens:
57
+ if messages[0]["role"]=="system" and not len(messages[0]["content"]>=max_num_tokens):
58
+ messages = messages[:1] + messages[2:]
59
+ else:
60
+ messages = messages[1:]
61
+ return messages
62
+
63
+
64
+ import gradio as gr
65
+
66
+
67
  def to_md(content):
68
  is_inside_code_block = False
69
  output_spans = []
 
101
  output_spans.append("\n")
102
  else:
103
  output_spans.append(content[i])
104
+ return "".join(output_spans)
105
+
106
+
107
+ def predict(question, history=[], behavior=[]):
108
+ history = ask(question, history, behavior)
109
+ response = [(to_md(history[i]),to_md(history[i+1])) for i in range(0,len(history)-1,2)]
110
+ return "", history, response
111
+
112
+
113
+ with gr.Blocks() as demo:
114
+
115
+ examples_txt = [
116
+ ['帮我写一个python脚本实现快排'],
117
+ ['如何用numpy提取数组的分位数?'],
118
+ ['how to match the code block in markdown such like ```def foo():\n pass``` through regex in python?'],
119
+ ['how to load a pre-trained language model and generate sentences?'],
120
+ ]
121
+
122
+ examples_bhv = [
123
+ f"You are a helpful assistant. You will answer all the questions step-by-step.",
124
+ f"You are a helpful assistant. Today is {datetime.date.today()}.",
125
+ ]
126
+
127
+ gr.Markdown(
128
+ """
129
+ 朋友你好,
130
+
131
+ 这是我利用[gradio](https://gradio.app/creating-a-chatbot/)编写的一个小网页,用于以网页的形式给大家分享ChatGPT请求服务,希望你玩的开心。关于使用技巧或学术研讨,欢迎在[Community](https://huggingface.co/spaces/zhangjf/chatbot/discussions)中和我交流。
132
+
133
+ 这一版相比于原版的[chatbot](https://huggingface.co/spaces/zhangjf/chatbot),用了较低版本的gradio==3.16.2,因而能更好地展示markdown中的源代码
134
+
135
+ p.s. 响应时间和聊天内容长度正相关,一般能在5秒~30秒内响应。
136
+ """)
137
+
138
+ behavior = gr.State([])
139
+
140
+ with gr.Column(variant="panel"):
141
+ with gr.Row().style(equal_height=True):
142
+ with gr.Column(scale=0.85):
143
+ bhv = gr.Textbox(show_label=False, placeholder="输入你想让ChatGPT扮演的人设").style(container=False)
144
+ with gr.Column(scale=0.15, min_width=0):
145
+ button_set = gr.Button("Set")
146
+ bhv.submit(fn=lambda x:(x,[x]), inputs=[bhv], outputs=[bhv, behavior])
147
+ button_set.click(fn=lambda x:(x,[x]), inputs=[bhv], outputs=[bhv, behavior])
148
+
149
+
150
+ state = gr.State([])
151
+
152
+ with gr.Column(variant="panel"):
153
+ chatbot = gr.Chatbot()
154
+ txt = gr.Textbox(show_label=False, placeholder="输入你想让ChatGPT回答的问题").style(container=False)
155
+ with gr.Row():
156
+ button_gen = gr.Button("Submit")
157
+ button_clr = gr.Button("Clear")
158
+
159
+ gr.Examples(examples=examples_bhv, inputs=bhv, label="Examples for setting behavior")
160
+ gr.Examples(examples=examples_txt, inputs=txt, label="Examples for asking question")
161
+ txt.submit(predict, [txt, state, behavior], [txt, state, chatbot])
162
+ button_gen.click(fn=predict, inputs=[txt, state, behavior], outputs=[txt, state, chatbot])
163
+ button_clr.click(fn=lambda :([],[]), inputs=None, outputs=[chatbot, state])
164
+
165
+ demo.launch()