3v324v23 commited on
Commit
ac2c8ca
1 Parent(s): b3a67b8

正确地显示requests错误

Browse files
Files changed (4) hide show
  1. .vscode/launch.json +16 -0
  2. functional.py +3 -3
  3. functional_crazy.py +108 -41
  4. predict.py +26 -10
.vscode/launch.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // Use IntelliSense to learn about possible attributes.
3
+ // Hover to view descriptions of existing attributes.
4
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
+ "version": "0.2.0",
6
+ "configurations": [
7
+ {
8
+ "name": "Python: Current File",
9
+ "type": "python",
10
+ "request": "launch",
11
+ "program": "${file}",
12
+ "console": "integratedTerminal",
13
+ "justMyCode": false
14
+ }
15
+ ]
16
+ }
functional.py CHANGED
@@ -12,7 +12,7 @@ def get_functionals():
12
  improve the spelling, grammar, clarity, concision and overall readability. When neccessary, rewrite the whole sentence. \
13
  Furthermore, list all modification and explain the reasons to do so in markdown table.\n\n", # 前言
14
  "Suffix": "", # 后语
15
- "Color": "stop", # 按钮颜色
16
  },
17
  "中文学术润色": {
18
  "Prefix": "作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本:\n\n",
@@ -35,7 +35,7 @@ For phrases or individual words that require translation, provide the source (di
35
  separate them using the | symbol.Always remember: You are an English-Chinese translator, \
36
  not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
37
  "Suffix": "",
38
- "Color": "stop",
39
  },
40
  "中译英": {
41
  "Prefix": "Please translate following sentence to English: \n\n",
@@ -52,7 +52,7 @@ not a Chinese-Chinese translator or an English-English translator. Below is the
52
  "解释代码": {
53
  "Prefix": "请解释以下代码:\n```\n",
54
  "Suffix": "\n```\n",
55
- "Color": "stop",
56
  },
57
  }
58
 
 
12
  improve the spelling, grammar, clarity, concision and overall readability. When neccessary, rewrite the whole sentence. \
13
  Furthermore, list all modification and explain the reasons to do so in markdown table.\n\n", # 前言
14
  "Suffix": "", # 后语
15
+ "Color": "secondary", # 按钮颜色
16
  },
17
  "中文学术润色": {
18
  "Prefix": "作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本:\n\n",
 
35
  separate them using the | symbol.Always remember: You are an English-Chinese translator, \
36
  not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
37
  "Suffix": "",
38
+ "Color": "secondary",
39
  },
40
  "中译英": {
41
  "Prefix": "Please translate following sentence to English: \n\n",
 
52
  "解释代码": {
53
  "Prefix": "请解释以下代码:\n```\n",
54
  "Suffix": "\n```\n",
55
+ "Color": "secondary",
56
  },
57
  }
58
 
functional_crazy.py CHANGED
@@ -3,6 +3,9 @@
3
  # 'secondary' for a more subdued style,
4
  # 'stop' for a stop button.
5
  # """
 
 
 
6
  def 自我程序解构简单案例(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
7
  import time
8
  from predict import predict_no_ui_no_history
@@ -25,90 +28,154 @@ def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTx
25
  file_content = f.read()
26
 
27
  前言 = "接下来请你分析自己的程序构成,别紧张," if index==0 else ""
28
- i_say = f'请对下面的程序文件做一个概述: ```{file_content}```'
29
- i_say_show_user = 前言 + f'请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
30
  chatbot.append((i_say_show_user, "[waiting gpt response]"))
31
  yield chatbot, history, '正常'
32
 
33
- # ** gpt request **
34
- gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
 
35
 
36
- chatbot[-1] = (i_say_show_user, gpt_say)
37
- history.append(i_say_show_user); history.append(gpt_say)
38
- yield chatbot, history, '正常'
39
- time.sleep(2)
40
 
41
  i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。'
42
  chatbot.append((i_say, "[waiting gpt response]"))
43
  yield chatbot, history, '正常'
44
 
45
- # ** gpt request **
46
- gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
 
47
 
48
- chatbot[-1] = (i_say, gpt_say)
49
- history.append(i_say); history.append(gpt_say)
50
- yield chatbot, history, '正常'
51
 
 
 
 
52
 
53
- def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
 
 
 
 
54
  import time, glob, os
55
  from predict import predict_no_ui
56
- if os.path.exists(txt):
57
- project_folder = txt
58
- else:
59
- if txt == "": txt = '空空如也的输入栏'
60
- chatbot.append((f"解析项目: {txt}", f"找不到本地项目: {txt}"))
61
- history.append(f"解析项目: {txt}"); history.append(f"找不到本地项目: {txt}")
62
- yield chatbot, history, '正常'
63
- return
64
-
65
- file_manifest = [f for f in glob.glob(f'{project_folder}/*.py')]
66
  print('begin analysis on:', file_manifest)
67
  for index, fp in enumerate(file_manifest):
68
  with open(fp, 'r', encoding='utf-8') as f:
69
  file_content = f.read()
70
 
71
- 前言 = "接下来请你逐文件分析下面的Python工程" if index==0 else ""
72
- i_say = f'请对下面的程序文件做一个概述: ```{file_content}```'
73
  i_say_show_user = 前言 + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
74
  chatbot.append((i_say_show_user, "[waiting gpt response]"))
75
  print('[1] yield chatbot, history')
76
  yield chatbot, history, '正常'
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  # ** gpt request **
79
- gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
- print('[2] end gpt req')
82
- chatbot[-1] = (i_say_show_user, gpt_say)
83
- history.append(i_say_show_user); history.append(gpt_say)
84
- print('[3] yield chatbot, history')
 
 
 
 
85
  yield chatbot, history, '正常'
86
- print('[4] next')
87
- time.sleep(2)
 
 
 
 
 
88
 
89
- i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。'
90
- chatbot.append((i_say, "[waiting gpt response]"))
91
- yield chatbot, history, '正常'
92
 
93
- # ** gpt request **
94
- gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
95
 
96
- chatbot[-1] = (i_say, gpt_say)
97
- history.append(i_say); history.append(gpt_say)
98
- yield chatbot, history, '正常'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
 
101
  def get_crazy_functionals():
102
  return {
103
  "程序解构简单案例": {
 
104
  "Function": 自我程序解构简单案例
105
  },
106
  "请解析并解构此项目本身": {
 
107
  "Function": 解析项目本身
108
  },
109
  "解析一整个Python项目(输入栏给定项目完整目录)": {
 
110
  "Function": 解析一个Python项目
111
  },
 
 
 
 
 
112
  }
113
 
114
 
 
3
  # 'secondary' for a more subdued style,
4
  # 'stop' for a stop button.
5
  # """
6
+
7
+ fast_debug = False
8
+
9
  def 自我程序解构简单案例(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
10
  import time
11
  from predict import predict_no_ui_no_history
 
28
  file_content = f.read()
29
 
30
  前言 = "接下来请你分析自己的程序构成,别紧张," if index==0 else ""
31
+ i_say = 前言 + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```'
32
+ i_say_show_user = 前言 + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
33
  chatbot.append((i_say_show_user, "[waiting gpt response]"))
34
  yield chatbot, history, '正常'
35
 
36
+ if not fast_debug:
37
+ # ** gpt request **
38
+ gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
39
 
40
+ chatbot[-1] = (i_say_show_user, gpt_say)
41
+ history.append(i_say_show_user); history.append(gpt_say)
42
+ yield chatbot, history, '正常'
43
+ time.sleep(2)
44
 
45
  i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。'
46
  chatbot.append((i_say, "[waiting gpt response]"))
47
  yield chatbot, history, '正常'
48
 
49
+ if not fast_debug:
50
+ # ** gpt request **
51
+ gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
52
 
53
+ chatbot[-1] = (i_say, gpt_say)
54
+ history.append(i_say); history.append(gpt_say)
55
+ yield chatbot, history, '正常'
56
 
57
+ def report_execption(chatbot, history, a, b):
58
+ chatbot.append((a, b))
59
+ history.append(a); history.append(b)
60
 
61
+
62
+
63
+
64
+
65
+ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
66
  import time, glob, os
67
  from predict import predict_no_ui
 
 
 
 
 
 
 
 
 
 
68
  print('begin analysis on:', file_manifest)
69
  for index, fp in enumerate(file_manifest):
70
  with open(fp, 'r', encoding='utf-8') as f:
71
  file_content = f.read()
72
 
73
+ 前言 = "接下来请你逐文件分析下面的工程" if index==0 else ""
74
+ i_say = 前言 + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
75
  i_say_show_user = 前言 + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
76
  chatbot.append((i_say_show_user, "[waiting gpt response]"))
77
  print('[1] yield chatbot, history')
78
  yield chatbot, history, '正常'
79
 
80
+ if not fast_debug:
81
+ msg = '正常'
82
+ # ** gpt request **
83
+ while True:
84
+ try:
85
+ gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
86
+ break
87
+ except ConnectionAbortedError as e:
88
+ i_say = i_say[:len(i_say)//2]
89
+ msg = '文件太长,进行了拦腰截断'
90
+
91
+ print('[2] end gpt req')
92
+ chatbot[-1] = (i_say_show_user, gpt_say)
93
+ history.append(i_say_show_user); history.append(gpt_say)
94
+ print('[3] yield chatbot, history')
95
+ yield chatbot, history, msg
96
+ print('[4] next')
97
+ if not fast_debug: time.sleep(2)
98
+
99
+ all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
100
+ i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{all_file})。'
101
+ chatbot.append((i_say, "[waiting gpt response]"))
102
+ yield chatbot, history, '正常'
103
+
104
+ if not fast_debug:
105
+ msg = '正常'
106
  # ** gpt request **
107
+ while True:
108
+ try:
109
+ gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
110
+ break
111
+ except ConnectionAbortedError as e:
112
+ history = [his[len(his)//2:] for his in history]
113
+ msg = '对话历史太长,每段历史拦腰截断'
114
+
115
+
116
+ chatbot[-1] = (i_say, gpt_say)
117
+ history.append(i_say); history.append(gpt_say)
118
+ yield chatbot, history, msg
119
+
120
 
121
+
122
+ def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
123
+ import glob, os
124
+ if os.path.exists(txt):
125
+ project_folder = txt
126
+ else:
127
+ if txt == "": txt = '空空如也的输入栏'
128
+ report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目: {txt}")
129
  yield chatbot, history, '正常'
130
+ return
131
+ file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
132
+ if len(file_manifest) == 0:
133
+ report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
134
+ yield chatbot, history, '正常'
135
+ return
136
+ yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
137
 
 
 
 
138
 
 
 
139
 
140
+ def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
141
+ import glob, os
142
+ if os.path.exists(txt):
143
+ project_folder = txt
144
+ else:
145
+ if txt == "": txt = '空空如也的输入栏'
146
+ report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目: {txt}")
147
+ yield chatbot, history, '正常'
148
+ return
149
+ file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \
150
+ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
151
+ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
152
+ if len(file_manifest) == 0:
153
+ report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h/.cpp/.c文件: {txt}")
154
+ yield chatbot, history, '正常'
155
+ return
156
+ yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
157
+
158
 
159
 
160
  def get_crazy_functionals():
161
  return {
162
  "程序解构简单案例": {
163
+ "Color": "stop", # 按钮颜色
164
  "Function": 自我程序解构简单案例
165
  },
166
  "请解析并解构此项目本身": {
167
+ "Color": "stop", # 按钮颜色
168
  "Function": 解析项目本身
169
  },
170
  "解析一整个Python项目(输入栏给定项目完整目录)": {
171
+ "Color": "stop", # 按钮颜色
172
  "Function": 解析一个Python项目
173
  },
174
+ "解析一整个C++项目的头文件(输入栏给定项目完整目录)": {
175
+ "Color": "stop", # 按钮颜色
176
+ "Function": 解析一个C项目的头文件
177
+ },
178
+
179
  }
180
 
181
 
predict.py CHANGED
@@ -14,6 +14,13 @@ except: from config import proxies, API_URL, API_KEY, TIMEOUT_SECONDS
14
 
15
  timeout_bot_msg = 'Request timeout, network error. please check proxy settings in config.py.'
16
 
 
 
 
 
 
 
 
17
 
18
  def predict_no_ui(inputs, top_p, temperature, history=[]):
19
  messages = [{"role": "system", "content": ""}]
@@ -60,10 +67,17 @@ def predict_no_ui(inputs, top_p, temperature, history=[]):
60
  # make a POST request to the API endpoint using the requests.post method, passing in stream=True
61
  response = requests.post(API_URL, headers=headers, proxies=proxies,
62
  json=payload, stream=True, timeout=TIMEOUT_SECONDS*2)
63
- except:
 
64
  raise TimeoutError
65
-
66
- return json.loads(response.text)["choices"][0]["message"]["content"]
 
 
 
 
 
 
67
 
68
 
69
 
@@ -163,11 +177,6 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
163
  if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
164
  logging.info(f'[response] {chatbot[-1][-1]}')
165
  break
166
- except Exception as e:
167
- traceback.print_exc()
168
- print(chunk.decode())
169
-
170
- try:
171
  chunkjson = json.loads(chunk.decode()[6:])
172
  status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
173
  partial_words = partial_words + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
@@ -181,5 +190,12 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
181
 
182
  except Exception as e:
183
  traceback.print_exc()
184
- print(chunk.decode())
185
- yield chatbot, history, "Json解析不合常规"
 
 
 
 
 
 
 
 
14
 
15
  timeout_bot_msg = 'Request timeout, network error. please check proxy settings in config.py.'
16
 
17
+ def get_full_error(chunk, stream_response):
18
+ while True:
19
+ try:
20
+ chunk += next(stream_response)
21
+ except:
22
+ break
23
+ return chunk
24
 
25
  def predict_no_ui(inputs, top_p, temperature, history=[]):
26
  messages = [{"role": "system", "content": ""}]
 
67
  # make a POST request to the API endpoint using the requests.post method, passing in stream=True
68
  response = requests.post(API_URL, headers=headers, proxies=proxies,
69
  json=payload, stream=True, timeout=TIMEOUT_SECONDS*2)
70
+ except Exception as e:
71
+ traceback.print_exc()
72
  raise TimeoutError
73
+
74
+ try:
75
+ result = json.loads(response.text)["choices"][0]["message"]["content"]
76
+ return result
77
+ except Exception as e:
78
+ if "choices" not in response.text: print(response.text)
79
+ raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
80
+
81
 
82
 
83
 
 
177
  if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
178
  logging.info(f'[response] {chatbot[-1][-1]}')
179
  break
 
 
 
 
 
180
  chunkjson = json.loads(chunk.decode()[6:])
181
  status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
182
  partial_words = partial_words + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
 
190
 
191
  except Exception as e:
192
  traceback.print_exc()
193
+ yield chatbot, history, "Json解析不合常规,很可能是文本过长"
194
+ chunk = get_full_error(chunk, stream_response)
195
+ error_msg = chunk.decode()
196
+ if "reduce the length" in error_msg:
197
+ chatbot[-1] = (history[-1], "老铁,输入的文本太长了")
198
+ yield chatbot, history, "Json解析不合常规,很可能是文本过长" + error_msg
199
+ return
200
+
201
+