Spaces:
Runtime error
Runtime error
Not-Grim-Refer
commited on
Commit
•
62d2e21
1
Parent(s):
8311a6d
Update app.py
Browse files
app.py
CHANGED
@@ -2,25 +2,25 @@ import gradio as gr
|
|
2 |
import json
|
3 |
import asyncio
|
4 |
import os
|
5 |
-
from EdgeGPT import Chatbot, ConversationStyle
|
6 |
import re
|
|
|
7 |
|
8 |
# Read cookie from local file
|
9 |
with open('./cookies.json', 'r') as f:
|
10 |
cookies = json.load(f)
|
11 |
-
|
12 |
-
#
|
13 |
async def get_model_reply(prompt, style, context=[]):
|
14 |
# Combines the new question with a previous context
|
15 |
context += [prompt]
|
16 |
-
#
|
17 |
-
# Given the most recent context (4096 characters)
|
18 |
-
# Continue the text up to 2048 tokens ~ 8192 characters
|
19 |
bot = Chatbot(cookies=cookies)
|
|
|
20 |
prompt2 = '\n\n'.join(context)[:4096]
|
21 |
raw_data = await bot.ask(prompt2, conversation_style=style)
|
22 |
await bot.close()
|
23 |
-
|
|
|
24 |
try:
|
25 |
try:
|
26 |
response = raw_data["item"]["messages"][1]["text"]
|
@@ -29,57 +29,53 @@ async def get_model_reply(prompt, style, context=[]):
|
|
29 |
response = re.sub(r'\^', '', response)
|
30 |
response = response.rstrip()
|
31 |
context += [response]
|
32 |
-
|
33 |
-
# List of (user, bot) responses. We will use this format later
|
34 |
responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
|
35 |
return responses, context
|
36 |
except:
|
37 |
try:
|
|
|
38 |
if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"]["maxNumUserMessagesInConversation"]:
|
39 |
response = "> **Oops, I think we've reached the end of this conversation. Please reset the bot!**"
|
40 |
context += [response]
|
41 |
-
|
42 |
-
# List of (user, bot) responses. We will use this format later
|
43 |
responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
|
44 |
return responses, context
|
45 |
-
|
46 |
except:
|
|
|
47 |
if raw_data["item"]["result"]["value"] == "Throttled":
|
48 |
response = "> **Error: We're sorry, but you've reached the maximum number of messages you can send to Bing in a 24-hour period. Check back later!**"
|
49 |
context += [response]
|
50 |
-
|
51 |
-
# List of (user, bot) responses. We will use this format later
|
52 |
responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
|
53 |
return responses, context
|
54 |
|
55 |
-
#
|
56 |
-
|
57 |
-
#
|
58 |
-
|
59 |
-
#
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
61 |
with gr.Blocks() as dialog_app:
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
chatbot = gr.Chatbot()
|
67 |
-
state = gr.State([])
|
68 |
-
markdown = gr.Markdown(label="Output")
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
)
|
75 |
-
style = gr.Dropdown(label="Answer tendency", choices=["creative", "balanced", "precise"], multiselect=False,
|
76 |
-
value="balanced", type="value")
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
81 |
|
82 |
-
#
|
83 |
dialog_app.launch()
|
84 |
-
|
85 |
-
# Dialog_app.launch(auth=("admin", "pass1234"))
|
|
|
2 |
import json
|
3 |
import asyncio
|
4 |
import os
|
|
|
5 |
import re
|
6 |
+
from EdgeGPT import Chatbot, ConversationStyle
|
7 |
|
8 |
# Read cookie from local file
|
9 |
with open('./cookies.json', 'r') as f:
|
10 |
cookies = json.load(f)
|
11 |
+
|
12 |
+
# Async function to get model reply
|
13 |
async def get_model_reply(prompt, style, context=[]):
|
14 |
# Combines the new question with a previous context
|
15 |
context += [prompt]
|
16 |
+
# Initialize chatbot with cookies
|
|
|
|
|
17 |
bot = Chatbot(cookies=cookies)
|
18 |
+
# Limit the context to 4096 characters
|
19 |
prompt2 = '\n\n'.join(context)[:4096]
|
20 |
raw_data = await bot.ask(prompt2, conversation_style=style)
|
21 |
await bot.close()
|
22 |
+
|
23 |
+
# Try to get response from raw data
|
24 |
try:
|
25 |
try:
|
26 |
response = raw_data["item"]["messages"][1]["text"]
|
|
|
29 |
response = re.sub(r'\^', '', response)
|
30 |
response = response.rstrip()
|
31 |
context += [response]
|
32 |
+
# List of (user, bot) responses
|
|
|
33 |
responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
|
34 |
return responses, context
|
35 |
except:
|
36 |
try:
|
37 |
+
# Check if conversation limit is reached
|
38 |
if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"]["maxNumUserMessagesInConversation"]:
|
39 |
response = "> **Oops, I think we've reached the end of this conversation. Please reset the bot!**"
|
40 |
context += [response]
|
|
|
|
|
41 |
responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
|
42 |
return responses, context
|
|
|
43 |
except:
|
44 |
+
# Check if user is throttled
|
45 |
if raw_data["item"]["result"]["value"] == "Throttled":
|
46 |
response = "> **Error: We're sorry, but you've reached the maximum number of messages you can send to Bing in a 24-hour period. Check back later!**"
|
47 |
context += [response]
|
|
|
|
|
48 |
responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
|
49 |
return responses, context
|
50 |
|
51 |
+
# Function to send user input to model and get reply
|
52 |
+
def send(inputs, style, state):
|
53 |
+
# Run async function and get responses
|
54 |
+
responses, context = asyncio.run(get_model_reply(inputs, style, state))
|
55 |
+
# Append user input and bot response to chat interface
|
56 |
+
chatbot.append_message("user", inputs)
|
57 |
+
chatbot.append_message("bot", responses[-1][1])
|
58 |
+
# Update state
|
59 |
+
state.append(responses)
|
60 |
+
|
61 |
+
# Define Gradio interface
|
62 |
with gr.Blocks() as dialog_app:
|
63 |
+
gr.Markdown("# A Simple Web to use New Bing Without Magic")
|
64 |
+
chatbot = gr.Chatbot()
|
65 |
+
state = gr.State([])
|
66 |
+
markdown = gr.Markdown(label="Output")
|
|
|
|
|
|
|
67 |
|
68 |
+
# Input fields
|
69 |
+
with gr.Row():
|
70 |
+
inputs = gr.Textbox(label="Enter question", placeholder="Enter text and press enter")
|
71 |
+
style = gr.Dropdown(label="Answer tendency", choices=["creative", "balanced", "precise"], multiselect=False, value="balanced", type="value")
|
|
|
|
|
|
|
72 |
|
73 |
+
# Set 'send' as submit function for input fields
|
74 |
+
inputs.submit(send, [inputs, style, state])
|
75 |
+
send = gr.Button("Send")
|
76 |
+
# Set 'send' as click function for 'Send' button
|
77 |
+
send.click(send, [inputs, style, state])
|
78 |
|
79 |
+
# Launch Gradio app
|
80 |
dialog_app.launch()
|
81 |
+
|
|