Not-Grim-Refer commited on
Commit
c491e45
1 Parent(s): 62d2e21

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -18
app.py CHANGED
@@ -11,16 +11,12 @@ with open('./cookies.json', 'r') as f:
11
 
12
  # Async function to get model reply
13
  async def get_model_reply(prompt, style, context=[]):
14
- # Combines the new question with a previous context
15
  context += [prompt]
16
- # Initialize chatbot with cookies
17
  bot = Chatbot(cookies=cookies)
18
- # Limit the context to 4096 characters
19
  prompt2 = '\n\n'.join(context)[:4096]
20
  raw_data = await bot.ask(prompt2, conversation_style=style)
21
  await bot.close()
22
 
23
- # Try to get response from raw data
24
  try:
25
  try:
26
  response = raw_data["item"]["messages"][1]["text"]
@@ -29,19 +25,16 @@ async def get_model_reply(prompt, style, context=[]):
29
  response = re.sub(r'\^', '', response)
30
  response = response.rstrip()
31
  context += [response]
32
- # List of (user, bot) responses
33
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
34
  return responses, context
35
  except:
36
  try:
37
- # Check if conversation limit is reached
38
  if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"]["maxNumUserMessagesInConversation"]:
39
  response = "> **Oops, I think we've reached the end of this conversation. Please reset the bot!**"
40
  context += [response]
41
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
42
  return responses, context
43
  except:
44
- # Check if user is throttled
45
  if raw_data["item"]["result"]["value"] == "Throttled":
46
  response = "> **Error: We're sorry, but you've reached the maximum number of messages you can send to Bing in a 24-hour period. Check back later!**"
47
  context += [response]
@@ -49,13 +42,10 @@ async def get_model_reply(prompt, style, context=[]):
49
  return responses, context
50
 
51
  # Function to send user input to model and get reply
52
- def send(inputs, style, state):
53
- # Run async function and get responses
54
  responses, context = asyncio.run(get_model_reply(inputs, style, state))
55
- # Append user input and bot response to chat interface
56
  chatbot.append_message("user", inputs)
57
  chatbot.append_message("bot", responses[-1][1])
58
- # Update state
59
  state.append(responses)
60
 
61
  # Define Gradio interface
@@ -65,17 +55,12 @@ with gr.Blocks() as dialog_app:
65
  state = gr.State([])
66
  markdown = gr.Markdown(label="Output")
67
 
68
- # Input fields
69
  with gr.Row():
70
  inputs = gr.Textbox(label="Enter question", placeholder="Enter text and press enter")
71
  style = gr.Dropdown(label="Answer tendency", choices=["creative", "balanced", "precise"], multiselect=False, value="balanced", type="value")
72
 
73
- # Set 'send' as submit function for input fields
74
- inputs.submit(send, [inputs, style, state])
75
  send = gr.Button("Send")
76
- # Set 'send' as click function for 'Send' button
77
- send.click(send, [inputs, style, state])
78
 
79
- # Launch Gradio app
80
  dialog_app.launch()
81
-
 
11
 
12
  # Async function to get model reply
13
  async def get_model_reply(prompt, style, context=[]):
 
14
  context += [prompt]
 
15
  bot = Chatbot(cookies=cookies)
 
16
  prompt2 = '\n\n'.join(context)[:4096]
17
  raw_data = await bot.ask(prompt2, conversation_style=style)
18
  await bot.close()
19
 
 
20
  try:
21
  try:
22
  response = raw_data["item"]["messages"][1]["text"]
 
25
  response = re.sub(r'\^', '', response)
26
  response = response.rstrip()
27
  context += [response]
 
28
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
29
  return responses, context
30
  except:
31
  try:
 
32
  if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"]["maxNumUserMessagesInConversation"]:
33
  response = "> **Oops, I think we've reached the end of this conversation. Please reset the bot!**"
34
  context += [response]
35
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
36
  return responses, context
37
  except:
 
38
  if raw_data["item"]["result"]["value"] == "Throttled":
39
  response = "> **Error: We're sorry, but you've reached the maximum number of messages you can send to Bing in a 24-hour period. Check back later!**"
40
  context += [response]
 
42
  return responses, context
43
 
44
  # Function to send user input to model and get reply
45
+ def get_reply(inputs, style, state):
 
46
  responses, context = asyncio.run(get_model_reply(inputs, style, state))
 
47
  chatbot.append_message("user", inputs)
48
  chatbot.append_message("bot", responses[-1][1])
 
49
  state.append(responses)
50
 
51
  # Define Gradio interface
 
55
  state = gr.State([])
56
  markdown = gr.Markdown(label="Output")
57
 
 
58
  with gr.Row():
59
  inputs = gr.Textbox(label="Enter question", placeholder="Enter text and press enter")
60
  style = gr.Dropdown(label="Answer tendency", choices=["creative", "balanced", "precise"], multiselect=False, value="balanced", type="value")
61
 
62
+ inputs.submit(get_reply, [inputs, style, state])
 
63
  send = gr.Button("Send")
64
+ send.click(get_reply, [inputs, style, state])
 
65
 
 
66
  dialog_app.launch()