Spaces:
Runtime error
Runtime error
add proxy debug funtion
Browse files- check_proxy.py +25 -0
- config.py +9 -1
- functional.py +3 -11
- main.py +5 -4
- predict.py +42 -36
check_proxy.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
"""
|
3 |
+
我:用python的requests库查询本机ip地址所在地
|
4 |
+
ChatGPT:
|
5 |
+
"""
|
6 |
+
def check_proxy(proxies):
|
7 |
+
import requests
|
8 |
+
try:
|
9 |
+
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
|
10 |
+
data = response.json()
|
11 |
+
country = data['country_name']
|
12 |
+
# city = data['city']
|
13 |
+
proxies_https = proxies['https'] if proxies is not None else '无'
|
14 |
+
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
15 |
+
print(result)
|
16 |
+
return result
|
17 |
+
except:
|
18 |
+
result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
|
19 |
+
print(result)
|
20 |
+
return result
|
21 |
+
|
22 |
+
|
23 |
+
if __name__ == '__main__':
|
24 |
+
from config import proxies
|
25 |
+
check_proxy(proxies)
|
config.py
CHANGED
@@ -2,14 +2,22 @@
|
|
2 |
API_KEY = "sk-此处填API秘钥"
|
3 |
API_URL = "https://api.openai.com/v1/chat/completions"
|
4 |
|
|
|
5 |
USE_PROXY = False
|
6 |
if USE_PROXY:
|
|
|
7 |
proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", }
|
8 |
print('网络代理状态:运行。')
|
9 |
else:
|
10 |
proxies = None
|
11 |
print('网络代理状态:未配置。无代理状态下很可能无法访问。')
|
12 |
|
13 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
if API_KEY == "sk-此处填API秘钥":
|
15 |
assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行"
|
|
|
2 |
API_KEY = "sk-此处填API秘钥"
|
3 |
API_URL = "https://api.openai.com/v1/chat/completions"
|
4 |
|
5 |
+
# 改为True应用代理
|
6 |
USE_PROXY = False
|
7 |
if USE_PROXY:
|
8 |
+
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
9 |
proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", }
|
10 |
print('网络代理状态:运行。')
|
11 |
else:
|
12 |
proxies = None
|
13 |
print('网络代理状态:未配置。无代理状态下很可能无法访问。')
|
14 |
|
15 |
+
# 发送请求到OpenAI后,等待多久判定为超时
|
16 |
+
TIMEOUT_SECONDS = 20
|
17 |
+
|
18 |
+
# 网页的端口, -1代表随机端口
|
19 |
+
WEB_PORT = -1
|
20 |
+
|
21 |
+
# 检查一下是不是忘了改config
|
22 |
if API_KEY == "sk-此处填API秘钥":
|
23 |
assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行"
|
functional.py
CHANGED
@@ -10,19 +10,16 @@ def get_functionals():
|
|
10 |
"英语学术润色": {
|
11 |
"Prefix": "Below is a paragraph from an academic paper. Polish the writing to meet the academic style, \
|
12 |
improve the spelling, grammar, clarity, concision and overall readability. When neccessary, rewrite the whole sentence. \
|
13 |
-
Furthermore, list all modification and explain the reasons to do so in markdown table.\n\n",
|
14 |
-
"
|
15 |
-
"
|
16 |
-
"Color": "stop",
|
17 |
},
|
18 |
"中文学术润色": {
|
19 |
"Prefix": "作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本:\n\n",
|
20 |
-
"Button": None,
|
21 |
"Suffix": "",
|
22 |
},
|
23 |
"查找语法错误": {
|
24 |
"Prefix": "Below is a paragraph from an academic paper. Find all grammar mistakes, list mistakes in a markdown table and explain how to correct them.\n\n",
|
25 |
-
"Button": None,
|
26 |
"Suffix": "",
|
27 |
},
|
28 |
"中英互译": {
|
@@ -37,28 +34,23 @@ When providing translations, please use Chinese to explain each sentence’s ten
|
|
37 |
For phrases or individual words that require translation, provide the source (dictionary) for each one.If asked to translate multiple phrases at once, \
|
38 |
separate them using the | symbol.Always remember: You are an English-Chinese translator, \
|
39 |
not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
|
40 |
-
"Button": None,
|
41 |
"Suffix": "",
|
42 |
"Color": "stop",
|
43 |
},
|
44 |
"中译英": {
|
45 |
"Prefix": "Please translate following sentence to English: \n\n",
|
46 |
-
"Button": None,
|
47 |
"Suffix": "",
|
48 |
},
|
49 |
"学术中译英": {
|
50 |
"Prefix": "Please translate following sentence to English with academic writing, and provide some related authoritative examples: \n\n",
|
51 |
-
"Button": None,
|
52 |
"Suffix": "",
|
53 |
},
|
54 |
"英译中": {
|
55 |
"Prefix": "请翻译成中文:\n\n",
|
56 |
-
"Button": None,
|
57 |
"Suffix": "",
|
58 |
},
|
59 |
"解释代码": {
|
60 |
"Prefix": "请解释以下代码:\n```\n",
|
61 |
-
"Button": None,
|
62 |
"Suffix": "\n```\n",
|
63 |
"Color": "stop",
|
64 |
},
|
|
|
10 |
"英语学术润色": {
|
11 |
"Prefix": "Below is a paragraph from an academic paper. Polish the writing to meet the academic style, \
|
12 |
improve the spelling, grammar, clarity, concision and overall readability. When neccessary, rewrite the whole sentence. \
|
13 |
+
Furthermore, list all modification and explain the reasons to do so in markdown table.\n\n", # 前言
|
14 |
+
"Suffix": "", # 后语
|
15 |
+
"Color": "stop", # 按钮颜色
|
|
|
16 |
},
|
17 |
"中文学术润色": {
|
18 |
"Prefix": "作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本:\n\n",
|
|
|
19 |
"Suffix": "",
|
20 |
},
|
21 |
"查找语法错误": {
|
22 |
"Prefix": "Below is a paragraph from an academic paper. Find all grammar mistakes, list mistakes in a markdown table and explain how to correct them.\n\n",
|
|
|
23 |
"Suffix": "",
|
24 |
},
|
25 |
"中英互译": {
|
|
|
34 |
For phrases or individual words that require translation, provide the source (dictionary) for each one.If asked to translate multiple phrases at once, \
|
35 |
separate them using the | symbol.Always remember: You are an English-Chinese translator, \
|
36 |
not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
|
|
|
37 |
"Suffix": "",
|
38 |
"Color": "stop",
|
39 |
},
|
40 |
"中译英": {
|
41 |
"Prefix": "Please translate following sentence to English: \n\n",
|
|
|
42 |
"Suffix": "",
|
43 |
},
|
44 |
"学术中译英": {
|
45 |
"Prefix": "Please translate following sentence to English with academic writing, and provide some related authoritative examples: \n\n",
|
|
|
46 |
"Suffix": "",
|
47 |
},
|
48 |
"英译中": {
|
49 |
"Prefix": "请翻译成中文:\n\n",
|
|
|
50 |
"Suffix": "",
|
51 |
},
|
52 |
"解释代码": {
|
53 |
"Prefix": "请解释以下代码:\n```\n",
|
|
|
54 |
"Suffix": "\n```\n",
|
55 |
"Color": "stop",
|
56 |
},
|
main.py
CHANGED
@@ -3,6 +3,7 @@ import os
|
|
3 |
import markdown, mdtex2html
|
4 |
from predict import predict
|
5 |
from show_math import convert as convert_math
|
|
|
6 |
|
7 |
def find_free_port():
|
8 |
import socket
|
@@ -11,8 +12,8 @@ def find_free_port():
|
|
11 |
s.bind(('', 0))
|
12 |
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
13 |
return s.getsockname()[1]
|
14 |
-
|
15 |
-
PORT = find_free_port()
|
16 |
|
17 |
initial_prompt = "Serve me as a writing and programming assistant."
|
18 |
title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
|
@@ -81,8 +82,8 @@ with gr.Blocks() as demo:
|
|
81 |
for k in functional:
|
82 |
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
83 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
84 |
-
|
85 |
-
statusDisplay = gr.Markdown("
|
86 |
systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True)
|
87 |
#inputs, top_p, temperature, top_k, repetition_penalty
|
88 |
with gr.Accordion("arguments", open=False):
|
|
|
3 |
import markdown, mdtex2html
|
4 |
from predict import predict
|
5 |
from show_math import convert as convert_math
|
6 |
+
from config import proxies, WEB_PORT
|
7 |
|
8 |
def find_free_port():
|
9 |
import socket
|
|
|
12 |
s.bind(('', 0))
|
13 |
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
14 |
return s.getsockname()[1]
|
15 |
+
|
16 |
+
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
17 |
|
18 |
initial_prompt = "Serve me as a writing and programming assistant."
|
19 |
title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
|
|
|
82 |
for k in functional:
|
83 |
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
84 |
functional[k]["Button"] = gr.Button(k, variant=variant)
|
85 |
+
from check_proxy import check_proxy
|
86 |
+
statusDisplay = gr.Markdown(f"{check_proxy(proxies)}")
|
87 |
systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True)
|
88 |
#inputs, top_p, temperature, top_k, repetition_penalty
|
89 |
with gr.Accordion("arguments", open=False):
|
predict.py
CHANGED
@@ -8,11 +8,11 @@ import os
|
|
8 |
|
9 |
if os.path.exists('config_private.py'):
|
10 |
# 放自己的秘密如API和代理网址
|
11 |
-
from config_private import proxies, API_URL, API_KEY
|
12 |
else:
|
13 |
-
from config import proxies, API_URL, API_KEY
|
14 |
-
|
15 |
|
|
|
16 |
|
17 |
def compose_system(system_prompt):
|
18 |
return {"role": "system", "content": system_prompt}
|
@@ -35,7 +35,7 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
35 |
raw_input = inputs
|
36 |
logging.info(f'[raw_input] {raw_input}')
|
37 |
chatbot.append((inputs, ""))
|
38 |
-
yield chatbot, history, "
|
39 |
|
40 |
headers = {
|
41 |
"Content-Type": "application/json",
|
@@ -49,26 +49,29 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
49 |
messages = [compose_system(system_prompt)]
|
50 |
if chat_counter:
|
51 |
for index in range(0, 2*chat_counter, 2):
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
if
|
59 |
-
if
|
60 |
-
|
61 |
-
|
|
|
62 |
else:
|
63 |
-
messages[-1]['content'] =
|
|
|
64 |
if retry and chat_counter:
|
65 |
messages.pop()
|
66 |
else:
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
messages.append(
|
71 |
chat_counter += 1
|
|
|
72 |
# messages
|
73 |
payload = {
|
74 |
"model": "gpt-3.5-turbo",
|
@@ -87,10 +90,10 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
87 |
try:
|
88 |
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
89 |
response = requests.post(API_URL, headers=headers, proxies=proxies,
|
90 |
-
json=payload, stream=True, timeout=
|
91 |
except:
|
92 |
-
chatbot[-1] = ((chatbot[-1][0],
|
93 |
-
yield chatbot, history, "
|
94 |
raise TimeoutError
|
95 |
|
96 |
token_counter = 0
|
@@ -101,8 +104,6 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
101 |
stream_response = response.iter_lines()
|
102 |
while True:
|
103 |
chunk = next(stream_response)
|
104 |
-
# print(chunk)
|
105 |
-
|
106 |
if chunk == b'data: [DONE]':
|
107 |
break
|
108 |
|
@@ -119,16 +120,21 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
119 |
break
|
120 |
except Exception as e:
|
121 |
traceback.print_exc()
|
|
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
json.loads(chunk.decode()[6:])[
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
if os.path.exists('config_private.py'):
|
10 |
# 放自己的秘密如API和代理网址
|
11 |
+
from config_private import proxies, API_URL, API_KEY, TIMEOUT_SECONDS
|
12 |
else:
|
13 |
+
from config import proxies, API_URL, API_KEY, TIMEOUT_SECONDS
|
|
|
14 |
|
15 |
+
timeout_bot_msg = 'Request timeout, network error. please check proxy settings in config.py.'
|
16 |
|
17 |
def compose_system(system_prompt):
|
18 |
return {"role": "system", "content": system_prompt}
|
|
|
35 |
raw_input = inputs
|
36 |
logging.info(f'[raw_input] {raw_input}')
|
37 |
chatbot.append((inputs, ""))
|
38 |
+
yield chatbot, history, "等待响应"
|
39 |
|
40 |
headers = {
|
41 |
"Content-Type": "application/json",
|
|
|
49 |
messages = [compose_system(system_prompt)]
|
50 |
if chat_counter:
|
51 |
for index in range(0, 2*chat_counter, 2):
|
52 |
+
what_i_have_asked = {}
|
53 |
+
what_i_have_asked["role"] = "user"
|
54 |
+
what_i_have_asked["content"] = history[index]
|
55 |
+
what_gpt_answer = {}
|
56 |
+
what_gpt_answer["role"] = "assistant"
|
57 |
+
what_gpt_answer["content"] = history[index+1]
|
58 |
+
if what_i_have_asked["content"] != "":
|
59 |
+
if not (what_gpt_answer["content"] != "" or retry): continue
|
60 |
+
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
61 |
+
messages.append(what_i_have_asked)
|
62 |
+
messages.append(what_gpt_answer)
|
63 |
else:
|
64 |
+
messages[-1]['content'] = what_gpt_answer['content']
|
65 |
+
|
66 |
if retry and chat_counter:
|
67 |
messages.pop()
|
68 |
else:
|
69 |
+
what_i_ask_now = {}
|
70 |
+
what_i_ask_now["role"] = "user"
|
71 |
+
what_i_ask_now["content"] = inputs
|
72 |
+
messages.append(what_i_ask_now)
|
73 |
chat_counter += 1
|
74 |
+
|
75 |
# messages
|
76 |
payload = {
|
77 |
"model": "gpt-3.5-turbo",
|
|
|
90 |
try:
|
91 |
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
92 |
response = requests.post(API_URL, headers=headers, proxies=proxies,
|
93 |
+
json=payload, stream=True, timeout=TIMEOUT_SECONDS)
|
94 |
except:
|
95 |
+
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
96 |
+
yield chatbot, history, "请求超时"
|
97 |
raise TimeoutError
|
98 |
|
99 |
token_counter = 0
|
|
|
104 |
stream_response = response.iter_lines()
|
105 |
while True:
|
106 |
chunk = next(stream_response)
|
|
|
|
|
107 |
if chunk == b'data: [DONE]':
|
108 |
break
|
109 |
|
|
|
120 |
break
|
121 |
except Exception as e:
|
122 |
traceback.print_exc()
|
123 |
+
print(chunk.decode())
|
124 |
|
125 |
+
try:
|
126 |
+
chunkjson = json.loads(chunk.decode()[6:])
|
127 |
+
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
|
128 |
+
partial_words = partial_words + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
|
129 |
+
if token_counter == 0:
|
130 |
+
history.append(" " + partial_words)
|
131 |
+
else:
|
132 |
+
history[-1] = partial_words
|
133 |
+
chatbot[-1] = (history[-2], history[-1])
|
134 |
+
token_counter += 1
|
135 |
+
yield chatbot, history, status_text
|
136 |
+
|
137 |
+
except Exception as e:
|
138 |
+
traceback.print_exc()
|
139 |
+
print(chunk.decode())
|
140 |
+
yield chatbot, history, "Json解析不合常规"
|