JohnSmith9982 commited on
Commit
2c3bb3b
1 Parent(s): d340a9d

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +6 -6
  2. presets.py +3 -2
  3. requirements.txt +4 -0
  4. utils.py +64 -27
app.py CHANGED
@@ -1,5 +1,5 @@
 
1
  import gradio as gr
2
- # import openai
3
  import os
4
  import sys
5
  import argparse
@@ -43,11 +43,11 @@ gr.Chatbot.postprocess = postprocess
43
 
44
  with gr.Blocks(css=customCSS) as demo:
45
  gr.HTML(title)
46
- gr.HTML('''<center><a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="复制 Space"></a>强烈建议点击上面的按钮复制一份这个Space,在你自己的Space里运行,响应更迅速、也更安全👆</center>''')
47
  with gr.Row():
48
- keyTxt = gr.Textbox(show_label=False, placeholder=f"在这里输入你的OpenAI API-key...",
49
- value=my_api_key, type="password", visible=not HIDE_MY_KEY).style(container=True)
50
- use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
 
51
  chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
52
  history = gr.State([])
53
  token_count = gr.State([])
@@ -138,7 +138,7 @@ with gr.Blocks(css=customCSS) as demo:
138
 
139
  templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
140
 
141
- print("川虎的温馨提示:访问 http://localhost:7860 查看界面")
142
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
143
  demo.title = "川虎ChatGPT 🚀"
144
 
 
1
+ # -*- coding:utf-8 -*-
2
  import gradio as gr
 
3
  import os
4
  import sys
5
  import argparse
 
43
 
44
  with gr.Blocks(css=customCSS) as demo:
45
  gr.HTML(title)
 
46
  with gr.Row():
47
+ with gr.Column(scale=4):
48
+ keyTxt = gr.Textbox(show_label=False, placeholder=f"在这里输入你的OpenAI API-key...",value=my_api_key, type="password", visible=not HIDE_MY_KEY).style(container=True)
49
+ with gr.Column(scale=1):
50
+ use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
51
  chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
52
  history = gr.State([])
53
  token_count = gr.State([])
 
138
 
139
  templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
140
 
141
+ print(colorama.Back.GREEN + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" + colorama.Style.RESET_ALL)
142
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
143
  demo.title = "川虎ChatGPT 🚀"
144
 
presets.py CHANGED
@@ -1,3 +1,4 @@
 
1
  title = """<h1 align="center">川虎ChatGPT 🚀</h1>"""
2
  description = """<div align=center>
3
 
@@ -33,9 +34,9 @@ pre code {
33
  standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
34
  error_retrieve_prompt = "连接超时,无法获取对话。请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
35
  summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
36
- max_token_streaming = 3000 # 流式对话时的最大 token 数
37
  timeout_streaming = 5 # 流式对话时的超时时间
38
  max_token_all = 3500 # 非流式对话时的最大 token 数
39
  timeout_all = 200 # 非流式对话时的超时时间
40
- enable_streaming_option = False # 是否启用选择选择是否实时显示回答的勾选框
41
  HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
 
1
+ # -*- coding:utf-8 -*-
2
  title = """<h1 align="center">川虎ChatGPT 🚀</h1>"""
3
  description = """<div align=center>
4
 
 
34
  standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
35
  error_retrieve_prompt = "连接超时,无法获取对话。请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
36
  summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
37
+ max_token_streaming = 3500 # 流式对话时的最大 token 数
38
  timeout_streaming = 5 # 流式对话时的超时时间
39
  max_token_all = 3500 # 非流式对话时的最大 token 数
40
  timeout_all = 200 # 非流式对话时的超时时间
41
+ enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
42
  HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
requirements.txt CHANGED
@@ -1,3 +1,7 @@
1
  gradio
2
  mdtex2html
3
  pypinyin
 
 
 
 
 
1
  gradio
2
  mdtex2html
3
  pypinyin
4
+ tiktoken
5
+ socksio
6
+ tqdm
7
+ colorama
utils.py CHANGED
@@ -1,7 +1,4 @@
1
- """Contains all of the components that can be used with Gradio Interface / Blocks.
2
- Along with the docs for each component, you can find the names of example demos that use
3
- each component. These demos are located in the `demo` directory."""
4
-
5
  from __future__ import annotations
6
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
7
  import json
@@ -15,6 +12,9 @@ import csv
15
  import mdtex2html
16
  from pypinyin import lazy_pinyin
17
  from presets import *
 
 
 
18
 
19
  if TYPE_CHECKING:
20
  from typing import TypedDict
@@ -48,6 +48,11 @@ def postprocess(
48
  )
49
  return y
50
 
 
 
 
 
 
51
  def parse_text(text):
52
  lines = text.split("\n")
53
  lines = [line for line in lines if line != ""]
@@ -92,8 +97,7 @@ def construct_assistant(text):
92
  return construct_text("assistant", text)
93
 
94
  def construct_token_message(token, stream=False):
95
- extra = "【仅包含回答的计数】 " if stream else ""
96
- return f"{extra}Token 计数: {token}"
97
 
98
  def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
99
  headers = {
@@ -123,11 +127,20 @@ def get_response(openai_api_key, system_prompt, history, temperature, top_p, str
123
  def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
124
  def get_return_value():
125
  return chatbot, history, status_text, [*previous_token_count, token_counter]
 
 
126
  token_counter = 0
127
  partial_words = ""
128
  counter = 0
129
- status_text = "OK"
130
  history.append(construct_user(inputs))
 
 
 
 
 
 
 
131
  try:
132
  response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True)
133
  except requests.exceptions.ConnectTimeout:
@@ -138,7 +151,7 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, prev
138
  chatbot.append((parse_text(inputs), ""))
139
  yield get_return_value()
140
 
141
- for chunk in response.iter_lines():
142
  if counter == 0:
143
  counter += 1
144
  continue
@@ -151,8 +164,9 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, prev
151
  # decode each line as response data is in bytes
152
  if chunklength > 6 and "delta" in chunk['choices'][0]:
153
  finish_reason = chunk['choices'][0]['finish_reason']
154
- status_text = construct_token_message(sum(previous_token_count)+token_counter, stream=True)
155
  if finish_reason == "stop":
 
156
  yield get_return_value()
157
  break
158
  partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
@@ -166,6 +180,7 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, prev
166
 
167
 
168
  def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
 
169
  history.append(construct_user(inputs))
170
  try:
171
  response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False)
@@ -179,22 +194,29 @@ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previou
179
  total_token_count = response["usage"]["total_tokens"]
180
  previous_token_count.append(total_token_count - sum(previous_token_count))
181
  status_text = construct_token_message(total_token_count)
 
182
  return chatbot, history, status_text, previous_token_count
183
 
184
 
185
  def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=False, should_check_token_count = True): # repetition_penalty, top_k
 
186
  if stream:
 
187
  iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
188
  for chatbot, history, status_text, token_count in iter:
189
  yield chatbot, history, status_text, token_count
190
  else:
 
191
  chatbot, history, status_text, token_count = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
192
  yield chatbot, history, status_text, token_count
 
 
193
  if stream:
194
  max_token = max_token_streaming
195
  else:
196
  max_token = max_token_all
197
  if sum(token_count) > max_token and should_check_token_count:
 
198
  iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=True)
199
  for chatbot, history, status_text, token_count in iter:
200
  status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
@@ -202,6 +224,7 @@ def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count
202
 
203
 
204
  def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False):
 
205
  if len(history) == 0:
206
  yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
207
  return
@@ -209,11 +232,13 @@ def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, t
209
  inputs = history.pop()["content"]
210
  token_count.pop()
211
  iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream)
 
212
  for x in iter:
213
  yield x
214
 
215
 
216
  def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
 
217
  iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, should_check_token_count=False)
218
  for chatbot, history, status_text, previous_token_count in iter:
219
  history = history[-2:]
@@ -221,23 +246,29 @@ def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_cou
221
  if hidden:
222
  chatbot.pop()
223
  yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
 
224
 
225
 
226
  def delete_last_conversation(chatbot, history, previous_token_count, streaming):
227
  if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
 
228
  chatbot.pop()
229
  return chatbot, history
230
  if len(history) > 0:
 
231
  history.pop()
232
  history.pop()
233
  if len(chatbot) > 0:
 
234
  chatbot.pop()
235
  if len(previous_token_count) > 0:
 
236
  previous_token_count.pop()
237
  return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count), streaming)
238
 
239
 
240
  def save_chat_history(filename, system, history, chatbot):
 
241
  if filename == "":
242
  return
243
  if not filename.endswith(".json"):
@@ -247,30 +278,39 @@ def save_chat_history(filename, system, history, chatbot):
247
  print(json_s)
248
  with open(os.path.join(HISTORY_DIR, filename), "w") as f:
249
  json.dump(json_s, f)
 
250
 
251
 
252
  def load_chat_history(filename, system, history, chatbot):
 
253
  try:
254
  with open(os.path.join(HISTORY_DIR, filename), "r") as f:
255
  json_s = json.load(f)
256
- if type(json_s["history"]) == list:
257
- new_history = []
258
- for index, item in enumerate(json_s["history"]):
259
- if index % 2 == 0:
260
- new_history.append(construct_user(item))
261
- else:
262
- new_history.append(construct_assistant(item))
263
- json_s["history"] = new_history
 
 
 
 
 
 
 
264
  return filename, json_s["system"], json_s["history"], json_s["chatbot"]
265
  except FileNotFoundError:
266
- print("File not found.")
267
  return filename, system, history, chatbot
268
 
269
  def sorted_by_pinyin(list):
270
  return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
271
 
272
  def get_file_names(dir, plain=False, filetypes=[".json"]):
273
- # find all json files in the current directory and return their names
274
  files = []
275
  try:
276
  for type in filetypes:
@@ -286,9 +326,11 @@ def get_file_names(dir, plain=False, filetypes=[".json"]):
286
  return gr.Dropdown.update(choices=files)
287
 
288
  def get_history_names(plain=False):
 
289
  return get_file_names(HISTORY_DIR, plain)
290
 
291
  def load_template(filename, mode=0):
 
292
  lines = []
293
  print("Loading template...")
294
  if filename.endswith(".json"):
@@ -309,24 +351,19 @@ def load_template(filename, mode=0):
309
  return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
310
 
311
  def get_template_names(plain=False):
 
312
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
313
 
314
  def get_template_content(templates, selection, original_system_prompt):
 
315
  try:
316
  return templates[selection]
317
  except:
318
  return original_system_prompt
319
 
320
  def reset_state():
 
321
  return [], [], [], construct_token_message(0)
322
 
323
- def compose_system(system_prompt):
324
- return {"role": "system", "content": system_prompt}
325
-
326
-
327
- def compose_user(user_input):
328
- return {"role": "user", "content": user_input}
329
-
330
-
331
  def reset_textbox():
332
  return gr.update(value='')
 
1
+ # -*- coding:utf-8 -*-
 
 
 
2
  from __future__ import annotations
3
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
  import json
 
12
  import mdtex2html
13
  from pypinyin import lazy_pinyin
14
  from presets import *
15
+ import tiktoken
16
+ from tqdm import tqdm
17
+ import colorama
18
 
19
  if TYPE_CHECKING:
20
  from typing import TypedDict
 
48
  )
49
  return y
50
 
51
+ def count_token(input_str):
52
+ encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
53
+ length = len(encoding.encode(input_str))
54
+ return length
55
+
56
  def parse_text(text):
57
  lines = text.split("\n")
58
  lines = [line for line in lines if line != ""]
 
97
  return construct_text("assistant", text)
98
 
99
  def construct_token_message(token, stream=False):
100
+ return f"Token 计数: {token}"
 
101
 
102
  def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
103
  headers = {
 
127
  def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
128
  def get_return_value():
129
  return chatbot, history, status_text, [*previous_token_count, token_counter]
130
+
131
+ print("实时回答模式")
132
  token_counter = 0
133
  partial_words = ""
134
  counter = 0
135
+ status_text = "开始实时传输回答……"
136
  history.append(construct_user(inputs))
137
+ user_token_count = 0
138
+ if len(previous_token_count) == 0:
139
+ system_prompt_token_count = count_token(system_prompt)
140
+ user_token_count = count_token(inputs) + system_prompt_token_count
141
+ else:
142
+ user_token_count = count_token(inputs)
143
+ print(f"输入token计数: {user_token_count}")
144
  try:
145
  response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True)
146
  except requests.exceptions.ConnectTimeout:
 
151
  chatbot.append((parse_text(inputs), ""))
152
  yield get_return_value()
153
 
154
+ for chunk in tqdm(response.iter_lines()):
155
  if counter == 0:
156
  counter += 1
157
  continue
 
164
  # decode each line as response data is in bytes
165
  if chunklength > 6 and "delta" in chunk['choices'][0]:
166
  finish_reason = chunk['choices'][0]['finish_reason']
167
+ status_text = construct_token_message(sum(previous_token_count)+token_counter+user_token_count, stream=True)
168
  if finish_reason == "stop":
169
+ print("生成完毕")
170
  yield get_return_value()
171
  break
172
  partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
 
180
 
181
 
182
  def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
183
+ print("一次性回答模式")
184
  history.append(construct_user(inputs))
185
  try:
186
  response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False)
 
194
  total_token_count = response["usage"]["total_tokens"]
195
  previous_token_count.append(total_token_count - sum(previous_token_count))
196
  status_text = construct_token_message(total_token_count)
197
+ print("生成一次性回答完毕")
198
  return chatbot, history, status_text, previous_token_count
199
 
200
 
201
  def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=False, should_check_token_count = True): # repetition_penalty, top_k
202
+ print("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
203
  if stream:
204
+ print("使用流式传输")
205
  iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
206
  for chatbot, history, status_text, token_count in iter:
207
  yield chatbot, history, status_text, token_count
208
  else:
209
+ print("不使用流式传输")
210
  chatbot, history, status_text, token_count = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
211
  yield chatbot, history, status_text, token_count
212
+ print(f"传输完毕。当前token计数为{token_count}")
213
+ print("回答为:" +colorama.Fore.BLUE + f"{history[-1]['content']}" + colorama.Style.RESET_ALL)
214
  if stream:
215
  max_token = max_token_streaming
216
  else:
217
  max_token = max_token_all
218
  if sum(token_count) > max_token and should_check_token_count:
219
+ print(f"精简token中{token_count}/{max_token}")
220
  iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=True)
221
  for chatbot, history, status_text, token_count in iter:
222
  status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
 
224
 
225
 
226
  def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False):
227
+ print("重试中……")
228
  if len(history) == 0:
229
  yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
230
  return
 
232
  inputs = history.pop()["content"]
233
  token_count.pop()
234
  iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream)
235
+ print("重试完毕")
236
  for x in iter:
237
  yield x
238
 
239
 
240
  def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
241
+ print("开始减少token数量……")
242
  iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, should_check_token_count=False)
243
  for chatbot, history, status_text, previous_token_count in iter:
244
  history = history[-2:]
 
246
  if hidden:
247
  chatbot.pop()
248
  yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
249
+ print("减少token数量完毕")
250
 
251
 
252
  def delete_last_conversation(chatbot, history, previous_token_count, streaming):
253
  if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
254
+ print("由于包含报错信息,只删除chatbot记录")
255
  chatbot.pop()
256
  return chatbot, history
257
  if len(history) > 0:
258
+ print("删除了一组对话历史")
259
  history.pop()
260
  history.pop()
261
  if len(chatbot) > 0:
262
+ print("删除了一组chatbot对话")
263
  chatbot.pop()
264
  if len(previous_token_count) > 0:
265
+ print("删除了一组对话的token计数记录")
266
  previous_token_count.pop()
267
  return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count), streaming)
268
 
269
 
270
  def save_chat_history(filename, system, history, chatbot):
271
+ print("保存对话历史中……")
272
  if filename == "":
273
  return
274
  if not filename.endswith(".json"):
 
278
  print(json_s)
279
  with open(os.path.join(HISTORY_DIR, filename), "w") as f:
280
  json.dump(json_s, f)
281
+ print("保存对话历史完毕")
282
 
283
 
284
  def load_chat_history(filename, system, history, chatbot):
285
+ print("加载对话历史中……")
286
  try:
287
  with open(os.path.join(HISTORY_DIR, filename), "r") as f:
288
  json_s = json.load(f)
289
+ try:
290
+ if type(json_s["history"][0]) == str:
291
+ print("历史记录格式为旧版,正在转换……")
292
+ new_history = []
293
+ for index, item in enumerate(json_s["history"]):
294
+ if index % 2 == 0:
295
+ new_history.append(construct_user(item))
296
+ else:
297
+ new_history.append(construct_assistant(item))
298
+ json_s["history"] = new_history
299
+ print(new_history)
300
+ except:
301
+ # 没有对话历史
302
+ pass
303
+ print("加载对话历史完毕")
304
  return filename, json_s["system"], json_s["history"], json_s["chatbot"]
305
  except FileNotFoundError:
306
+ print("没有找到对话历史文件,不执行任何操作")
307
  return filename, system, history, chatbot
308
 
309
  def sorted_by_pinyin(list):
310
  return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
311
 
312
  def get_file_names(dir, plain=False, filetypes=[".json"]):
313
+ print(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
314
  files = []
315
  try:
316
  for type in filetypes:
 
326
  return gr.Dropdown.update(choices=files)
327
 
328
  def get_history_names(plain=False):
329
+ print("获取历史记录文件名列表")
330
  return get_file_names(HISTORY_DIR, plain)
331
 
332
  def load_template(filename, mode=0):
333
+ print(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
334
  lines = []
335
  print("Loading template...")
336
  if filename.endswith(".json"):
 
351
  return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
352
 
353
  def get_template_names(plain=False):
354
+ print("获取模板文件名列表")
355
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
356
 
357
  def get_template_content(templates, selection, original_system_prompt):
358
+ print(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
359
  try:
360
  return templates[selection]
361
  except:
362
  return original_system_prompt
363
 
364
  def reset_state():
365
+ print("重置状态")
366
  return [], [], [], construct_token_message(0)
367
 
 
 
 
 
 
 
 
 
368
  def reset_textbox():
369
  return gr.update(value='')