Not-Grim-Refer commited on
Commit
62d2e21
1 Parent(s): 8311a6d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -40
app.py CHANGED
@@ -2,25 +2,25 @@ import gradio as gr
2
  import json
3
  import asyncio
4
  import os
5
- from EdgeGPT import Chatbot, ConversationStyle
6
  import re
 
7
 
8
  # Read cookie from local file
9
  with open('./cookies.json', 'r') as f:
10
  cookies = json.load(f)
11
- # If you choose to read the cookie.json file in the repository, you don't need to pass the cookies parameter to the function again
12
- # You can also delete the code corresponding to the gr.Tab("Cookies"): interface
13
  async def get_model_reply(prompt, style, context=[]):
14
  # Combines the new question with a previous context
15
  context += [prompt]
16
- # Cookies = json.loads(cookies)
17
- # Given the most recent context (4096 characters)
18
- # Continue the text up to 2048 tokens ~ 8192 characters
19
  bot = Chatbot(cookies=cookies)
 
20
  prompt2 = '\n\n'.join(context)[:4096]
21
  raw_data = await bot.ask(prompt2, conversation_style=style)
22
  await bot.close()
23
- # Print(raw_data)
 
24
  try:
25
  try:
26
  response = raw_data["item"]["messages"][1]["text"]
@@ -29,57 +29,53 @@ async def get_model_reply(prompt, style, context=[]):
29
  response = re.sub(r'\^', '', response)
30
  response = response.rstrip()
31
  context += [response]
32
-
33
- # List of (user, bot) responses. We will use this format later
34
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
35
  return responses, context
36
  except:
37
  try:
 
38
  if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"]["maxNumUserMessagesInConversation"]:
39
  response = "> **Oops, I think we've reached the end of this conversation. Please reset the bot!**"
40
  context += [response]
41
-
42
- # List of (user, bot) responses. We will use this format later
43
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
44
  return responses, context
45
-
46
  except:
 
47
  if raw_data["item"]["result"]["value"] == "Throttled":
48
  response = "> **Error: We're sorry, but you've reached the maximum number of messages you can send to Bing in a 24-hour period. Check back later!**"
49
  context += [response]
50
-
51
- # List of (user, bot) responses. We will use this format later
52
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
53
  return responses, context
54
 
55
- # Query = 'Which is the largest country by area in the world?'
56
- # Style="precise"
57
- # Responses, context =asyncio.run(get_model_reply(query,style,context=[]))
58
- #
59
- # Print(' ' + responses[-1][0])
60
- # Print(' ' + responses[-1][1])
 
 
 
 
 
61
  with gr.Blocks() as dialog_app:
62
- # With gr.Tab("Cookies"):
63
- # Cookies = gr.Textbox(lines=2, label="Enter cookies from bing.com")
64
- # With gr.Tab("New Bing Chat"):
65
- gr.Markdown("# A Simple Web to use New Bing Without Magic")
66
- chatbot = gr.Chatbot()
67
- state = gr.State([])
68
- markdown = gr.Markdown(label="Output")
69
 
70
- with gr.Row():
71
- inputs = gr.Textbox(
72
- label="Enter question",
73
- placeholder="Enter text and press enter"
74
- )
75
- style = gr.Dropdown(label="Answer tendency", choices=["creative", "balanced", "precise"], multiselect=False,
76
- value="balanced", type="value")
77
 
78
- inputs.submit(get_model_reply, [inputs, style, state], [chatbot, state])
79
- send = gr.Button("Send")
80
- send.click(get_model_reply, [inputs, style, state], [chatbot, state])
 
 
81
 
82
- # Launches the app in a new local port
83
  dialog_app.launch()
84
- # Set a password for the website to prevent abuse
85
- # Dialog_app.launch(auth=("admin", "pass1234"))
 
2
  import json
3
  import asyncio
4
  import os
 
5
  import re
6
+ from EdgeGPT import Chatbot, ConversationStyle
7
 
8
  # Read cookie from local file
9
  with open('./cookies.json', 'r') as f:
10
  cookies = json.load(f)
11
+
12
+ # Async function to get model reply
13
  async def get_model_reply(prompt, style, context=[]):
14
  # Combines the new question with a previous context
15
  context += [prompt]
16
+ # Initialize chatbot with cookies
 
 
17
  bot = Chatbot(cookies=cookies)
18
+ # Limit the context to 4096 characters
19
  prompt2 = '\n\n'.join(context)[:4096]
20
  raw_data = await bot.ask(prompt2, conversation_style=style)
21
  await bot.close()
22
+
23
+ # Try to get response from raw data
24
  try:
25
  try:
26
  response = raw_data["item"]["messages"][1]["text"]
 
29
  response = re.sub(r'\^', '', response)
30
  response = response.rstrip()
31
  context += [response]
32
+ # List of (user, bot) responses
 
33
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
34
  return responses, context
35
  except:
36
  try:
37
+ # Check if conversation limit is reached
38
  if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"]["maxNumUserMessagesInConversation"]:
39
  response = "> **Oops, I think we've reached the end of this conversation. Please reset the bot!**"
40
  context += [response]
 
 
41
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
42
  return responses, context
 
43
  except:
44
+ # Check if user is throttled
45
  if raw_data["item"]["result"]["value"] == "Throttled":
46
  response = "> **Error: We're sorry, but you've reached the maximum number of messages you can send to Bing in a 24-hour period. Check back later!**"
47
  context += [response]
 
 
48
  responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
49
  return responses, context
50
 
51
+ # Function to send user input to model and get reply
52
+ def send(inputs, style, state):
53
+ # Run async function and get responses
54
+ responses, context = asyncio.run(get_model_reply(inputs, style, state))
55
+ # Append user input and bot response to chat interface
56
+ chatbot.append_message("user", inputs)
57
+ chatbot.append_message("bot", responses[-1][1])
58
+ # Update state
59
+ state.append(responses)
60
+
61
+ # Define Gradio interface
62
  with gr.Blocks() as dialog_app:
63
+ gr.Markdown("# A Simple Web to use New Bing Without Magic")
64
+ chatbot = gr.Chatbot()
65
+ state = gr.State([])
66
+ markdown = gr.Markdown(label="Output")
 
 
 
67
 
68
+ # Input fields
69
+ with gr.Row():
70
+ inputs = gr.Textbox(label="Enter question", placeholder="Enter text and press enter")
71
+ style = gr.Dropdown(label="Answer tendency", choices=["creative", "balanced", "precise"], multiselect=False, value="balanced", type="value")
 
 
 
72
 
73
+ # Set 'send' as submit function for input fields
74
+ inputs.submit(send, [inputs, style, state])
75
+ send = gr.Button("Send")
76
+ # Set 'send' as click function for 'Send' button
77
+ send.click(send, [inputs, style, state])
78
 
79
+ # Launch Gradio app
80
  dialog_app.launch()
81
+