yym68686 commited on
Commit
27b85ef
·
1 Parent(s): 29e15d9

Fix the issue of handling errors in gpt format response

Browse files
Files changed (1) hide show
  1. response.py +20 -25
response.py CHANGED
@@ -91,39 +91,34 @@ async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects
91
  yield {"error": f"fetch_gpt_response_stream HTTP Error {response.status_code}", "details": error_json}
92
  return
93
 
94
- # 检查是否存在重定向脚本
95
- content = await response.aread()
96
- content_str = content.decode('utf-8', errors='replace')
97
- # logger.info(f"chunk: {repr(content_str)}")
98
- import re
99
- redirect_match = re.search(r"window\.location\.href\s*=\s*'([^']+)'", content_str)
100
- if redirect_match:
101
- new_url = redirect_match.group(1)
102
- # logger.info(f"new_url: {new_url}")
103
- if not new_url.startswith('http'):
104
- # 如果是相对路径,构造完整URL
105
- # logger.info(url.split('/'))
106
- base_url = '/'.join(url.split('/')[:3]) # 提取协议和域名
107
- new_url = base_url + new_url
108
- url = new_url
109
- # logger.info(f"new_url: {new_url}")
110
- redirect_count += 1
111
- continue
112
-
113
- buffer = content_str
114
- while "\n" in buffer:
115
- line, buffer = buffer.split("\n", 1)
116
- if line and line != "data: " and line != "data:" and not line.startswith(": "):
117
- yield line + "\n"
118
-
119
  async for chunk in response.aiter_text():
120
  # logger.info(f"chunk: {repr(chunk)}")
121
  buffer += chunk
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  while "\n" in buffer:
123
  line, buffer = buffer.split("\n", 1)
124
  # logger.info("line: %s", repr(line))
125
  if line and line != "data: " and line != "data:" and not line.startswith(": "):
126
  yield line + "\n"
 
 
 
127
  return
128
 
129
  yield {"error": "Too many redirects", "details": f"Reached maximum of {max_redirects} redirects"}
 
91
  yield {"error": f"fetch_gpt_response_stream HTTP Error {response.status_code}", "details": error_json}
92
  return
93
 
94
+ buffer = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  async for chunk in response.aiter_text():
96
  # logger.info(f"chunk: {repr(chunk)}")
97
  buffer += chunk
98
+ if chunk.startswith("<script"):
99
+ import re
100
+ redirect_match = re.search(r"window\.location\.href\s*=\s*'([^']+)'", chunk)
101
+ if redirect_match:
102
+ new_url = redirect_match.group(1)
103
+ # logger.info(f"new_url: {new_url}")
104
+ if not new_url.startswith('http'):
105
+ # 如果是相对路径,构造完整URL
106
+ # logger.info(url.split('/'))
107
+ base_url = '/'.join(url.split('/')[:3])
108
+ new_url = base_url + new_url
109
+ url = new_url
110
+ # logger.info(f"new_url: {new_url}")
111
+ redirect_count += 1
112
+ break
113
+ redirect_count = 0
114
  while "\n" in buffer:
115
  line, buffer = buffer.split("\n", 1)
116
  # logger.info("line: %s", repr(line))
117
  if line and line != "data: " and line != "data:" and not line.startswith(": "):
118
  yield line + "\n"
119
+ if redirect_count != 0:
120
+ continue
121
+ else:
122
  return
123
 
124
  yield {"error": "Too many redirects", "details": f"Reached maximum of {max_redirects} redirects"}