Spaces:
Runtime error
Runtime error
yizhangliu
commited on
Commit
•
bed83d4
1
Parent(s):
d586c92
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import os, sys, json
|
|
5 |
from loguru import logger
|
6 |
import paddlehub as hub
|
7 |
import random
|
|
|
8 |
|
9 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
10 |
|
@@ -56,23 +57,31 @@ def get_response_from_chatgpt(api, text):
|
|
56 |
response = "Openai said: I'm so tired. Let me lie down for a few days. If you like, you can visit my home(2)."
|
57 |
return response
|
58 |
|
|
|
|
|
|
|
|
|
|
|
59 |
def get_response_from_openai(input, history):
|
60 |
def openai_create(prompt):
|
61 |
# no chatgpt, and from gpt-3
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
ret
|
75 |
-
|
|
|
|
|
|
|
76 |
return ret
|
77 |
|
78 |
history = history or []
|
@@ -80,6 +89,15 @@ def get_response_from_openai(input, history):
|
|
80 |
s = list(sum(his, ()))
|
81 |
s.append(input)
|
82 |
inp = ' '.join(s)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
output = openai_create(inp)
|
84 |
return output
|
85 |
|
@@ -326,8 +344,6 @@ def chat(api, input0, input1, chat_radio, chat_history):
|
|
326 |
# logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
|
327 |
if chat_radio == "Talk to chatGPT":
|
328 |
# response = get_response_from_chatgpt(api, input0)
|
329 |
-
# response = get_response_from_microsoft(input0)
|
330 |
-
# response = get_response_from_skywork(input0)
|
331 |
response = get_response_from_openai(input0, out_chat)
|
332 |
out_chat.append((input0, response))
|
333 |
# logger.info(f'liuyz_5___{out_chat}__')
|
@@ -376,4 +392,4 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
|
|
376 |
with gr.Row(elem_id='tab_img', visible=False).style(height=5):
|
377 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|
378 |
|
379 |
-
demo.launch(debug = True)
|
|
|
5 |
from loguru import logger
|
6 |
import paddlehub as hub
|
7 |
import random
|
8 |
+
from encoder import get_encoder
|
9 |
|
10 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
11 |
|
|
|
57 |
response = "Openai said: I'm so tired. Let me lie down for a few days. If you like, you can visit my home(2)."
|
58 |
return response
|
59 |
|
60 |
+
token_encoder = get_encoder()
|
61 |
+
total_tokens = 4096
|
62 |
+
max_output_tokens = 1024
|
63 |
+
max_input_tokens = total_tokens - max_output_tokens
|
64 |
+
|
65 |
def get_response_from_openai(input, history):
|
66 |
def openai_create(prompt):
|
67 |
# no chatgpt, and from gpt-3
|
68 |
+
try:
|
69 |
+
response = openai.Completion.create(
|
70 |
+
model="text-davinci-003",
|
71 |
+
prompt=prompt,
|
72 |
+
temperature=0.9,
|
73 |
+
max_tokens=max_output_tokens,
|
74 |
+
top_p=1,
|
75 |
+
frequency_penalty=0,
|
76 |
+
presence_penalty=0.6,
|
77 |
+
stop=[" Human:", " AI:"]
|
78 |
+
)
|
79 |
+
ret = response.choices[0].text
|
80 |
+
if ret == '':
|
81 |
+
ret = "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home(3)."
|
82 |
+
except Exception as e:
|
83 |
+
ret = "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home(4)."
|
84 |
+
|
85 |
return ret
|
86 |
|
87 |
history = history or []
|
|
|
89 |
s = list(sum(his, ()))
|
90 |
s.append(input)
|
91 |
inp = ' '.join(s)
|
92 |
+
tokens = token_encoder.encode(inp)
|
93 |
+
if len(tokens) > max_input_tokens:
|
94 |
+
new_tokens = tokens[-max_input_tokens:]
|
95 |
+
inp = token_encoder.decode(new_tokens)
|
96 |
+
# tokens_1 = token_encoder.encode(inp)
|
97 |
+
# logger.info(f"tokens_len[1]__{len(tokens)}__{len(new_tokens)}__{len(tokens_1)}")
|
98 |
+
# else:
|
99 |
+
# logger.info(f"tokens_len[0]__{len(tokens)}")
|
100 |
+
|
101 |
output = openai_create(inp)
|
102 |
return output
|
103 |
|
|
|
344 |
# logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
|
345 |
if chat_radio == "Talk to chatGPT":
|
346 |
# response = get_response_from_chatgpt(api, input0)
|
|
|
|
|
347 |
response = get_response_from_openai(input0, out_chat)
|
348 |
out_chat.append((input0, response))
|
349 |
# logger.info(f'liuyz_5___{out_chat}__')
|
|
|
392 |
with gr.Row(elem_id='tab_img', visible=False).style(height=5):
|
393 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|
394 |
|
395 |
+
demo.launch(debug = True)
|