qingxu99 commited on
Commit
42eef1b
1 Parent(s): 728eba0

add free newbing without cookie using edge-gpt

Browse files
README.md CHANGED
@@ -301,16 +301,19 @@ gpt_academic开发者QQ群-2:610599535
301
  ```
302
  代码中参考了很多其他优秀项目中的设计,主要包括:
303
 
304
- # 项目1:清华ChatGLM-6B
305
  https://github.com/THUDM/ChatGLM-6B
306
 
307
- # 项目2:清华JittorLLMs
308
  https://github.com/Jittor/JittorLLMs
309
 
310
- # 项目3:借鉴了ChuanhuChatGPT中诸多技巧
 
 
 
311
  https://github.com/GaiZhenbiao/ChuanhuChatGPT
312
 
313
- # 项目4:ChatPaper
314
  https://github.com/kaixindelele/ChatPaper
315
 
316
  # 更多:
 
301
  ```
302
  代码中参考了很多其他优秀项目中的设计,主要包括:
303
 
304
+ # 项目1:清华ChatGLM-6B:
305
  https://github.com/THUDM/ChatGLM-6B
306
 
307
+ # 项目2:清华JittorLLMs:
308
  https://github.com/Jittor/JittorLLMs
309
 
310
+ # 项目3:Edge-GPT:
311
+ https://github.com/acheong08/EdgeGPT
312
+
313
+ # 项目4:ChuanhuChatGPT:
314
  https://github.com/GaiZhenbiao/ChuanhuChatGPT
315
 
316
+ # 项目5:ChatPaper:
317
  https://github.com/kaixindelele/ChatPaper
318
 
319
  # 更多:
config.py CHANGED
@@ -47,7 +47,7 @@ MAX_RETRY = 2
47
  # 模型选择是
48
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
49
  AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
50
- # P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
51
 
52
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
53
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
@@ -73,6 +73,7 @@ CUSTOM_PATH = "/"
73
 
74
  # 如果需要使用newbing,把newbing的长长的cookie放到这里
75
  NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
 
76
  NEWBING_COOKIES = """
77
  your bing cookies here
78
  """
 
47
  # 模型选择是
48
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
49
  AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
50
+ # P.S. 其他可用的模型还包括 ["newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
51
 
52
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
53
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
 
73
 
74
  # 如果需要使用newbing,把newbing的长长的cookie放到这里
75
  NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
76
+ # 从现在起,如果您调用"newbing-free"模型,则无需填写NEWBING_COOKIES
77
  NEWBING_COOKIES = """
78
  your bing cookies here
79
  """
request_llm/bridge_all.py CHANGED
@@ -201,7 +201,20 @@ if "stack-claude" in AVAIL_LLM_MODELS:
201
  "token_cnt": get_token_num_gpt35,
202
  }
203
  })
204
-
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
  def LLM_CATCH_EXCEPTION(f):
207
  """
 
201
  "token_cnt": get_token_num_gpt35,
202
  }
203
  })
204
+ if "newbing-free" in AVAIL_LLM_MODELS:
205
+ from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
206
+ from .bridge_newbingfree import predict as newbingfree_ui
207
+ # claude
208
+ model_info.update({
209
+ "newbing-free": {
210
+ "fn_with_ui": newbingfree_ui,
211
+ "fn_without_ui": newbingfree_noui,
212
+ "endpoint": newbing_endpoint,
213
+ "max_token": 4096,
214
+ "tokenizer": tokenizer_gpt35,
215
+ "token_cnt": get_token_num_gpt35,
216
+ }
217
+ })
218
 
219
  def LLM_CATCH_EXCEPTION(f):
220
  """
request_llm/bridge_newbingfree.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================================================================
3
+ 第一部分:来自EdgeGPT.py
4
+ https://github.com/acheong08/EdgeGPT
5
+ ========================================================================
6
+ """
7
+ from .edge_gpt_free import Chatbot as NewbingChatbot
8
+ load_message = "等待NewBing响应。"
9
+
10
+ """
11
+ ========================================================================
12
+ 第二部分:子进程Worker(调用主体)
13
+ ========================================================================
14
+ """
15
+ import time
16
+ import json
17
+ import re
18
+ import logging
19
+ import asyncio
20
+ import importlib
21
+ import threading
22
+ from toolbox import update_ui, get_conf, trimmed_format_exc
23
+ from multiprocessing import Process, Pipe
24
+
25
+ def preprocess_newbing_out(s):
26
+ pattern = r'\^(\d+)\^' # 匹配^数字^
27
+ sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
28
+ result = re.sub(pattern, sub, s) # 替换操作
29
+ if '[1]' in result:
30
+ result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
31
+ return result
32
+
33
+ def preprocess_newbing_out_simple(result):
34
+ if '[1]' in result:
35
+ result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
36
+ return result
37
+
38
+ class NewBingHandle(Process):
39
+ def __init__(self):
40
+ super().__init__(daemon=True)
41
+ self.parent, self.child = Pipe()
42
+ self.newbing_model = None
43
+ self.info = ""
44
+ self.success = True
45
+ self.local_history = []
46
+ self.check_dependency()
47
+ self.start()
48
+ self.threadLock = threading.Lock()
49
+
50
+ def check_dependency(self):
51
+ try:
52
+ self.success = False
53
+ import certifi, httpx, rich
54
+ self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
55
+ self.success = True
56
+ except:
57
+ self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
58
+ self.success = False
59
+
60
+ def ready(self):
61
+ return self.newbing_model is not None
62
+
63
+ async def async_run(self):
64
+ # 读取配置
65
+ NEWBING_STYLE, = get_conf('NEWBING_STYLE')
66
+ from request_llm.bridge_all import model_info
67
+ endpoint = model_info['newbing']['endpoint']
68
+ while True:
69
+ # 等待
70
+ kwargs = self.child.recv()
71
+ question=kwargs['query']
72
+ history=kwargs['history']
73
+ system_prompt=kwargs['system_prompt']
74
+
75
+ # 是否重置
76
+ if len(self.local_history) > 0 and len(history)==0:
77
+ await self.newbing_model.reset()
78
+ self.local_history = []
79
+
80
+ # 开始问问题
81
+ prompt = ""
82
+ if system_prompt not in self.local_history:
83
+ self.local_history.append(system_prompt)
84
+ prompt += system_prompt + '\n'
85
+
86
+ # 追加历史
87
+ for ab in history:
88
+ a, b = ab
89
+ if a not in self.local_history:
90
+ self.local_history.append(a)
91
+ prompt += a + '\n'
92
+ # if b not in self.local_history:
93
+ # self.local_history.append(b)
94
+ # prompt += b + '\n'
95
+
96
+ # 问题
97
+ prompt += question
98
+ self.local_history.append(question)
99
+ print('question:', prompt)
100
+ # 提交
101
+ async for final, response in self.newbing_model.ask_stream(
102
+ prompt=question,
103
+ conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
104
+ wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
105
+ ):
106
+ if not final:
107
+ print(response)
108
+ self.child.send(str(response))
109
+ else:
110
+ print('-------- receive final ---------')
111
+ self.child.send('[Finish]')
112
+ # self.local_history.append(response)
113
+
114
+
115
+ def run(self):
116
+ """
117
+ 这个函数运行在子进程
118
+ """
119
+ # 第一次运行,加载参数
120
+ self.success = False
121
+ self.local_history = []
122
+ if (self.newbing_model is None) or (not self.success):
123
+ # 代理设置
124
+ proxies, = get_conf('proxies')
125
+ if proxies is None:
126
+ self.proxies_https = None
127
+ else:
128
+ self.proxies_https = proxies['https']
129
+
130
+ try:
131
+ self.newbing_model = NewbingChatbot(proxy=self.proxies_https)
132
+ except:
133
+ self.success = False
134
+ tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
135
+ self.child.send(f'[Local Message] 不能加载Newbing组件。{tb_str}')
136
+ self.child.send('[Fail]')
137
+ self.child.send('[Finish]')
138
+ raise RuntimeError(f"不能加载Newbing组件。")
139
+
140
+ self.success = True
141
+ try:
142
+ # 进入任务等待状态
143
+ asyncio.run(self.async_run())
144
+ except Exception:
145
+ tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
146
+ self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
147
+ self.child.send('[Fail]')
148
+ self.child.send('[Finish]')
149
+
150
+ def stream_chat(self, **kwargs):
151
+ """
152
+ 这个函数运行在主进程
153
+ """
154
+ self.threadLock.acquire()
155
+ self.parent.send(kwargs) # 发送请求到子进程
156
+ while True:
157
+ res = self.parent.recv() # 等待newbing回复的片段
158
+ if res == '[Finish]':
159
+ break # 结束
160
+ elif res == '[Fail]':
161
+ self.success = False
162
+ break
163
+ else:
164
+ yield res # newbing回复的片段
165
+ self.threadLock.release()
166
+
167
+
168
+ """
169
+ ========================================================================
170
+ 第三部分:主进程统一调用函数接口
171
+ ========================================================================
172
+ """
173
+ global newbingfree_handle
174
+ newbingfree_handle = None
175
+
176
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
177
+ """
178
+ 多线程方法
179
+ 函数的说明请见 request_llm/bridge_all.py
180
+ """
181
+ global newbingfree_handle
182
+ if (newbingfree_handle is None) or (not newbingfree_handle.success):
183
+ newbingfree_handle = NewBingHandle()
184
+ if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
185
+ if not newbingfree_handle.success:
186
+ error = newbingfree_handle.info
187
+ newbingfree_handle = None
188
+ raise RuntimeError(error)
189
+
190
+ # 没有 sys_prompt 接口,因此把prompt加入 history
191
+ history_feedin = []
192
+ for i in range(len(history)//2):
193
+ history_feedin.append([history[2*i], history[2*i+1]] )
194
+
195
+ watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
196
+ response = ""
197
+ if len(observe_window) >= 1: observe_window[0] = "[Local Message]: 等待NewBing响应中 ..."
198
+ for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
199
+ if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response)
200
+ if len(observe_window) >= 2:
201
+ if (time.time()-observe_window[1]) > watch_dog_patience:
202
+ raise RuntimeError("程序终止。")
203
+ return preprocess_newbing_out_simple(response)
204
+
205
+ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
206
+ """
207
+ 单线程方法
208
+ 函数的说明请见 request_llm/bridge_all.py
209
+ """
210
+ chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ..."))
211
+
212
+ global newbingfree_handle
213
+ if (newbingfree_handle is None) or (not newbingfree_handle.success):
214
+ newbingfree_handle = NewBingHandle()
215
+ chatbot[-1] = (inputs, load_message + "\n\n" + newbingfree_handle.info)
216
+ yield from update_ui(chatbot=chatbot, history=[])
217
+ if not newbingfree_handle.success:
218
+ newbingfree_handle = None
219
+ return
220
+
221
+ if additional_fn is not None:
222
+ import core_functional
223
+ importlib.reload(core_functional) # 热更新prompt
224
+ core_functional = core_functional.get_core_functions()
225
+ if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
226
+ inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
227
+
228
+ history_feedin = []
229
+ for i in range(len(history)//2):
230
+ history_feedin.append([history[2*i], history[2*i+1]] )
231
+
232
+ chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...")
233
+ response = "[Local Message]: 等待NewBing响应中 ..."
234
+ yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
235
+ for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
236
+ chatbot[-1] = (inputs, preprocess_newbing_out(response))
237
+ yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成���再提交新问题。")
238
+ if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常,请刷新界面重试 ..."
239
+ history.extend([inputs, response])
240
+ logging.info(f'[raw_input] {inputs}')
241
+ logging.info(f'[response] {response}')
242
+ yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
243
+
request_llm/edge_gpt_free.py ADDED
@@ -0,0 +1,1114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================================================================
3
+ 第一部分:来自EdgeGPT.py
4
+ https://github.com/acheong08/EdgeGPT
5
+ ========================================================================
6
+ """
7
+ """
8
+ Main.py
9
+ """
10
+
11
+ import argparse
12
+ import asyncio
13
+ import json
14
+ import os
15
+ import random
16
+ import re
17
+ import ssl
18
+ import sys
19
+ import time
20
+ import uuid
21
+ from enum import Enum
22
+ from pathlib import Path
23
+ from typing import Generator
24
+ from typing import Literal
25
+ from typing import Optional
26
+ from typing import Union
27
+
28
+ import aiohttp
29
+ import certifi
30
+ import httpx
31
+ from BingImageCreator import ImageGen
32
+ from BingImageCreator import ImageGenAsync
33
+ from prompt_toolkit import PromptSession
34
+ from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
35
+ from prompt_toolkit.completion import WordCompleter
36
+ from prompt_toolkit.history import InMemoryHistory
37
+ from prompt_toolkit.key_binding import KeyBindings
38
+ from rich.live import Live
39
+ from rich.markdown import Markdown
40
+
41
+ DELIMITER = "\x1e"
42
+
43
+
44
+ # Generate random IP between range 13.104.0.0/14
45
+ FORWARDED_IP = (
46
+ f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
47
+ )
48
+
49
+ HEADERS = {
50
+ "accept": "application/json",
51
+ "accept-language": "en-US,en;q=0.9",
52
+ "content-type": "application/json",
53
+ "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
54
+ "sec-ch-ua-arch": '"x86"',
55
+ "sec-ch-ua-bitness": '"64"',
56
+ "sec-ch-ua-full-version": '"109.0.1518.78"',
57
+ "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
58
+ "sec-ch-ua-mobile": "?0",
59
+ "sec-ch-ua-model": "",
60
+ "sec-ch-ua-platform": '"Windows"',
61
+ "sec-ch-ua-platform-version": '"15.0.0"',
62
+ "sec-fetch-dest": "empty",
63
+ "sec-fetch-mode": "cors",
64
+ "sec-fetch-site": "same-origin",
65
+ "x-ms-client-request-id": str(uuid.uuid4()),
66
+ "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
67
+ "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
68
+ "Referrer-Policy": "origin-when-cross-origin",
69
+ "x-forwarded-for": FORWARDED_IP,
70
+ }
71
+
72
+ HEADERS_INIT_CONVER = {
73
+ "authority": "edgeservices.bing.com",
74
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
75
+ "accept-language": "en-US,en;q=0.9",
76
+ "cache-control": "max-age=0",
77
+ "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
78
+ "sec-ch-ua-arch": '"x86"',
79
+ "sec-ch-ua-bitness": '"64"',
80
+ "sec-ch-ua-full-version": '"110.0.1587.69"',
81
+ "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
82
+ "sec-ch-ua-mobile": "?0",
83
+ "sec-ch-ua-model": '""',
84
+ "sec-ch-ua-platform": '"Windows"',
85
+ "sec-ch-ua-platform-version": '"15.0.0"',
86
+ "sec-fetch-dest": "document",
87
+ "sec-fetch-mode": "navigate",
88
+ "sec-fetch-site": "none",
89
+ "sec-fetch-user": "?1",
90
+ "upgrade-insecure-requests": "1",
91
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
92
+ "x-edge-shopping-flag": "1",
93
+ "x-forwarded-for": FORWARDED_IP,
94
+ }
95
+
96
+ ssl_context = ssl.create_default_context()
97
+ ssl_context.load_verify_locations(certifi.where())
98
+
99
+
100
+ class NotAllowedToAccess(Exception):
101
+ pass
102
+
103
+
104
+ class ConversationStyle(Enum):
105
+ creative = [
106
+ "nlu_direct_response_filter",
107
+ "deepleo",
108
+ "disable_emoji_spoken_text",
109
+ "responsible_ai_policy_235",
110
+ "enablemm",
111
+ "h3imaginative",
112
+ "travelansgnd",
113
+ "dv3sugg",
114
+ "clgalileo",
115
+ "gencontentv3",
116
+ "dv3sugg",
117
+ "responseos",
118
+ "e2ecachewrite",
119
+ "cachewriteext",
120
+ "nodlcpcwrite",
121
+ "travelansgnd",
122
+ "nojbfedge",
123
+ ]
124
+ balanced = [
125
+ "nlu_direct_response_filter",
126
+ "deepleo",
127
+ "disable_emoji_spoken_text",
128
+ "responsible_ai_policy_235",
129
+ "enablemm",
130
+ "galileo",
131
+ "dv3sugg",
132
+ "responseos",
133
+ "e2ecachewrite",
134
+ "cachewriteext",
135
+ "nodlcpcwrite",
136
+ "travelansgnd",
137
+ "nojbfedge",
138
+ ]
139
+ precise = [
140
+ "nlu_direct_response_filter",
141
+ "deepleo",
142
+ "disable_emoji_spoken_text",
143
+ "responsible_ai_policy_235",
144
+ "enablemm",
145
+ "galileo",
146
+ "dv3sugg",
147
+ "responseos",
148
+ "e2ecachewrite",
149
+ "cachewriteext",
150
+ "nodlcpcwrite",
151
+ "travelansgnd",
152
+ "h3precise",
153
+ "clgalileo",
154
+ "nojbfedge",
155
+ ]
156
+
157
+
158
+ CONVERSATION_STYLE_TYPE = Optional[
159
+ Union[ConversationStyle, Literal["creative", "balanced", "precise"]]
160
+ ]
161
+
162
+
163
+ def _append_identifier(msg: dict) -> str:
164
+ """
165
+ Appends special character to end of message to identify end of message
166
+ """
167
+ # Convert dict to json string
168
+ return json.dumps(msg, ensure_ascii=False) + DELIMITER
169
+
170
+
171
+ def _get_ran_hex(length: int = 32) -> str:
172
+ """
173
+ Returns random hex string
174
+ """
175
+ return "".join(random.choice("0123456789abcdef") for _ in range(length))
176
+
177
+
178
+ class _ChatHubRequest:
179
+ """
180
+ Request object for ChatHub
181
+ """
182
+
183
+ def __init__(
184
+ self,
185
+ conversation_signature: str,
186
+ client_id: str,
187
+ conversation_id: str,
188
+ invocation_id: int = 0,
189
+ ) -> None:
190
+ self.struct: dict = {}
191
+
192
+ self.client_id: str = client_id
193
+ self.conversation_id: str = conversation_id
194
+ self.conversation_signature: str = conversation_signature
195
+ self.invocation_id: int = invocation_id
196
+
197
+ def update(
198
+ self,
199
+ prompt: str,
200
+ conversation_style: CONVERSATION_STYLE_TYPE,
201
+ options: list | None = None,
202
+ webpage_context: str | None = None,
203
+ search_result: bool = False,
204
+ ) -> None:
205
+ """
206
+ Updates request object
207
+ """
208
+ if options is None:
209
+ options = [
210
+ "deepleo",
211
+ "enable_debug_commands",
212
+ "disable_emoji_spoken_text",
213
+ "enablemm",
214
+ ]
215
+ if conversation_style:
216
+ if not isinstance(conversation_style, ConversationStyle):
217
+ conversation_style = getattr(ConversationStyle, conversation_style)
218
+ options = conversation_style.value
219
+ self.struct = {
220
+ "arguments": [
221
+ {
222
+ "source": "cib",
223
+ "optionsSets": options,
224
+ "allowedMessageTypes": [
225
+ "Chat",
226
+ "Disengaged",
227
+ "AdsQuery",
228
+ "SemanticSerp",
229
+ "GenerateContentQuery",
230
+ "SearchQuery",
231
+ ],
232
+ "sliceIds": [
233
+ "chk1cf",
234
+ "nopreloadsscf",
235
+ "winlongmsg2tf",
236
+ "perfimpcomb",
237
+ "sugdivdis",
238
+ "sydnoinputt",
239
+ "wpcssopt",
240
+ "wintone2tf",
241
+ "0404sydicnbs0",
242
+ "405suggbs0",
243
+ "scctl",
244
+ "330uaugs0",
245
+ "0329resp",
246
+ "udscahrfon",
247
+ "udstrblm5",
248
+ "404e2ewrt",
249
+ "408nodedups0",
250
+ "403tvlansgnd",
251
+ ],
252
+ "traceId": _get_ran_hex(32),
253
+ "isStartOfSession": self.invocation_id == 0,
254
+ "message": {
255
+ "author": "user",
256
+ "inputMethod": "Keyboard",
257
+ "text": prompt,
258
+ "messageType": "Chat",
259
+ },
260
+ "conversationSignature": self.conversation_signature,
261
+ "participant": {
262
+ "id": self.client_id,
263
+ },
264
+ "conversationId": self.conversation_id,
265
+ },
266
+ ],
267
+ "invocationId": str(self.invocation_id),
268
+ "target": "chat",
269
+ "type": 4,
270
+ }
271
+ if search_result:
272
+ have_search_result = [
273
+ "InternalSearchQuery",
274
+ "InternalSearchResult",
275
+ "InternalLoaderMessage",
276
+ "RenderCardRequest",
277
+ ]
278
+ self.struct["arguments"][0]["allowedMessageTypes"] += have_search_result
279
+ if webpage_context:
280
+ self.struct["arguments"][0]["previousMessages"] = [
281
+ {
282
+ "author": "user",
283
+ "description": webpage_context,
284
+ "contextType": "WebPage",
285
+ "messageType": "Context",
286
+ "messageId": "discover-web--page-ping-mriduna-----",
287
+ },
288
+ ]
289
+ self.invocation_id += 1
290
+
291
+
292
+ class _Conversation:
293
+ """
294
+ Conversation API
295
+ """
296
+
297
+ def __init__(
298
+ self,
299
+ proxy: str | None = None,
300
+ async_mode: bool = False,
301
+ cookies: list[dict] | None = None,
302
+ ) -> None:
303
+ if async_mode:
304
+ return
305
+ self.struct: dict = {
306
+ "conversationId": None,
307
+ "clientId": None,
308
+ "conversationSignature": None,
309
+ "result": {"value": "Success", "message": None},
310
+ }
311
+ self.proxy = proxy
312
+ proxy = (
313
+ proxy
314
+ or os.environ.get("all_proxy")
315
+ or os.environ.get("ALL_PROXY")
316
+ or os.environ.get("https_proxy")
317
+ or os.environ.get("HTTPS_PROXY")
318
+ or None
319
+ )
320
+ if proxy is not None and proxy.startswith("socks5h://"):
321
+ proxy = "socks5://" + proxy[len("socks5h://") :]
322
+ self.session = httpx.Client(
323
+ proxies=proxy,
324
+ timeout=30,
325
+ headers=HEADERS_INIT_CONVER,
326
+ )
327
+ if cookies:
328
+ for cookie in cookies:
329
+ self.session.cookies.set(cookie["name"], cookie["value"])
330
+ # Send GET request
331
+ response = self.session.get(
332
+ url=os.environ.get("BING_PROXY_URL")
333
+ or "https://edgeservices.bing.com/edgesvc/turing/conversation/create",
334
+ )
335
+ if response.status_code != 200:
336
+ response = self.session.get(
337
+ "https://edge.churchless.tech/edgesvc/turing/conversation/create",
338
+ )
339
+ if response.status_code != 200:
340
+ print(f"Status code: {response.status_code}")
341
+ print(response.text)
342
+ print(response.url)
343
+ raise Exception("Authentication failed")
344
+ try:
345
+ self.struct = response.json()
346
+ except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:
347
+ raise Exception(
348
+ "Authentication failed. You have not been accepted into the beta.",
349
+ ) from exc
350
+ if self.struct["result"]["value"] == "UnauthorizedRequest":
351
+ raise NotAllowedToAccess(self.struct["result"]["message"])
352
+
353
+ @staticmethod
354
+ async def create(
355
+ proxy: str | None = None,
356
+ cookies: list[dict] | None = None,
357
+ ):
358
+ self = _Conversation(async_mode=True)
359
+ self.struct = {
360
+ "conversationId": None,
361
+ "clientId": None,
362
+ "conversationSignature": None,
363
+ "result": {"value": "Success", "message": None},
364
+ }
365
+ self.proxy = proxy
366
+ proxy = (
367
+ proxy
368
+ or os.environ.get("all_proxy")
369
+ or os.environ.get("ALL_PROXY")
370
+ or os.environ.get("https_proxy")
371
+ or os.environ.get("HTTPS_PROXY")
372
+ or None
373
+ )
374
+ if proxy is not None and proxy.startswith("socks5h://"):
375
+ proxy = "socks5://" + proxy[len("socks5h://") :]
376
+ transport = httpx.AsyncHTTPTransport(retries=10)
377
+ # Convert cookie format to httpx format
378
+ formatted_cookies = None
379
+ if cookies:
380
+ formatted_cookies = httpx.Cookies()
381
+ for cookie in cookies:
382
+ formatted_cookies.set(cookie["name"], cookie["value"])
383
+ async with httpx.AsyncClient(
384
+ proxies=proxy,
385
+ timeout=30,
386
+ headers=HEADERS_INIT_CONVER,
387
+ transport=transport,
388
+ cookies=formatted_cookies,
389
+ ) as client:
390
+ # Send GET request
391
+ response = await client.get(
392
+ url=os.environ.get("BING_PROXY_URL")
393
+ or "https://edgeservices.bing.com/edgesvc/turing/conversation/create",
394
+ )
395
+ if response.status_code != 200:
396
+ response = await client.get(
397
+ "https://edge.churchless.tech/edgesvc/turing/conversation/create",
398
+ )
399
+ if response.status_code != 200:
400
+ print(f"Status code: {response.status_code}")
401
+ print(response.text)
402
+ print(response.url)
403
+ raise Exception("Authentication failed")
404
+ try:
405
+ self.struct = response.json()
406
+ except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:
407
+ raise Exception(
408
+ "Authentication failed. You have not been accepted into the beta.",
409
+ ) from exc
410
+ if self.struct["result"]["value"] == "UnauthorizedRequest":
411
+ raise NotAllowedToAccess(self.struct["result"]["message"])
412
+ return self
413
+
414
+
415
+ class _ChatHub:
416
+ """
417
+ Chat API
418
+ """
419
+
420
+ def __init__(
421
+ self,
422
+ conversation: _Conversation,
423
+ proxy: str = None,
424
+ cookies: list[dict] | None = None,
425
+ ) -> None:
426
+ self.session: aiohttp.ClientSession | None = None
427
+ self.wss: aiohttp.ClientWebSocketResponse | None = None
428
+ self.request: _ChatHubRequest
429
+ self.loop: bool
430
+ self.task: asyncio.Task
431
+ self.request = _ChatHubRequest(
432
+ conversation_signature=conversation.struct["conversationSignature"],
433
+ client_id=conversation.struct["clientId"],
434
+ conversation_id=conversation.struct["conversationId"],
435
+ )
436
+ self.cookies = cookies
437
+ self.proxy: str = proxy
438
+
439
+ async def ask_stream(
440
+ self,
441
+ prompt: str,
442
+ wss_link: str,
443
+ conversation_style: CONVERSATION_STYLE_TYPE = None,
444
+ raw: bool = False,
445
+ options: dict = None,
446
+ webpage_context: str | None = None,
447
+ search_result: bool = False,
448
+ ) -> Generator[str, None, None]:
449
+ """
450
+ Ask a question to the bot
451
+ """
452
+ timeout = aiohttp.ClientTimeout(total=30)
453
+ self.session = aiohttp.ClientSession(timeout=timeout)
454
+
455
+ if self.wss and not self.wss.closed:
456
+ await self.wss.close()
457
+ # Check if websocket is closed
458
+ self.wss = await self.session.ws_connect(
459
+ wss_link,
460
+ headers=HEADERS,
461
+ ssl=ssl_context,
462
+ proxy=self.proxy,
463
+ autoping=False,
464
+ )
465
+ await self._initial_handshake()
466
+ if self.request.invocation_id == 0:
467
+ # Construct a ChatHub request
468
+ self.request.update(
469
+ prompt=prompt,
470
+ conversation_style=conversation_style,
471
+ options=options,
472
+ webpage_context=webpage_context,
473
+ search_result=search_result,
474
+ )
475
+ else:
476
+ async with httpx.AsyncClient() as client:
477
+ response = await client.post(
478
+ "https://sydney.bing.com/sydney/UpdateConversation/",
479
+ json={
480
+ "messages": [
481
+ {
482
+ "author": "user",
483
+ "description": webpage_context,
484
+ "contextType": "WebPage",
485
+ "messageType": "Context",
486
+ },
487
+ ],
488
+ "conversationId": self.request.conversation_id,
489
+ "source": "cib",
490
+ "traceId": _get_ran_hex(32),
491
+ "participant": {"id": self.request.client_id},
492
+ "conversationSignature": self.request.conversation_signature,
493
+ },
494
+ )
495
+ if response.status_code != 200:
496
+ print(f"Status code: {response.status_code}")
497
+ print(response.text)
498
+ print(response.url)
499
+ raise Exception("Update web page context failed")
500
+ # Construct a ChatHub request
501
+ self.request.update(
502
+ prompt=prompt,
503
+ conversation_style=conversation_style,
504
+ options=options,
505
+ )
506
+ # Send request
507
+ await self.wss.send_str(_append_identifier(self.request.struct))
508
+ final = False
509
+ draw = False
510
+ resp_txt = ""
511
+ result_text = ""
512
+ resp_txt_no_link = ""
513
+ while not final:
514
+ msg = await self.wss.receive()
515
+ objects = msg.data.split(DELIMITER)
516
+ for obj in objects:
517
+ if obj is None or not obj:
518
+ continue
519
+ response = json.loads(obj)
520
+ if response.get("type") != 2 and raw:
521
+ yield False, response
522
+ elif response.get("type") == 1 and response["arguments"][0].get(
523
+ "messages",
524
+ ):
525
+ if not draw:
526
+ if (
527
+ response["arguments"][0]["messages"][0].get("messageType")
528
+ == "GenerateContentQuery"
529
+ ):
530
+ async with ImageGenAsync("", True) as image_generator:
531
+ images = await image_generator.get_images(
532
+ response["arguments"][0]["messages"][0]["text"],
533
+ )
534
+ for i, image in enumerate(images):
535
+ resp_txt = resp_txt + f"\n![image{i}]({image})"
536
+ draw = True
537
+ if (
538
+ response["arguments"][0]["messages"][0]["contentOrigin"]
539
+ != "Apology"
540
+ ) and not draw:
541
+ resp_txt = result_text + response["arguments"][0][
542
+ "messages"
543
+ ][0]["adaptiveCards"][0]["body"][0].get("text", "")
544
+ resp_txt_no_link = result_text + response["arguments"][0][
545
+ "messages"
546
+ ][0].get("text", "")
547
+ if response["arguments"][0]["messages"][0].get(
548
+ "messageType",
549
+ ):
550
+ resp_txt = (
551
+ resp_txt
552
+ + response["arguments"][0]["messages"][0][
553
+ "adaptiveCards"
554
+ ][0]["body"][0]["inlines"][0].get("text")
555
+ + "\n"
556
+ )
557
+ result_text = (
558
+ result_text
559
+ + response["arguments"][0]["messages"][0][
560
+ "adaptiveCards"
561
+ ][0]["body"][0]["inlines"][0].get("text")
562
+ + "\n"
563
+ )
564
+ yield False, resp_txt
565
+
566
+ elif response.get("type") == 2:
567
+ if response["item"]["result"].get("error"):
568
+ await self.close()
569
+ raise Exception(
570
+ f"{response['item']['result']['value']}: {response['item']['result']['message']}",
571
+ )
572
+ if draw:
573
+ cache = response["item"]["messages"][1]["adaptiveCards"][0][
574
+ "body"
575
+ ][0]["text"]
576
+ response["item"]["messages"][1]["adaptiveCards"][0]["body"][0][
577
+ "text"
578
+ ] = (cache + resp_txt)
579
+ if (
580
+ response["item"]["messages"][-1]["contentOrigin"] == "Apology"
581
+ and resp_txt
582
+ ):
583
+ response["item"]["messages"][-1]["text"] = resp_txt_no_link
584
+ response["item"]["messages"][-1]["adaptiveCards"][0]["body"][0][
585
+ "text"
586
+ ] = resp_txt
587
+ print(
588
+ "Preserved the message from being deleted",
589
+ file=sys.stderr,
590
+ )
591
+ final = True
592
+ await self.close()
593
+ yield True, response
594
+
595
+ async def _initial_handshake(self) -> None:
596
+ await self.wss.send_str(_append_identifier({"protocol": "json", "version": 1}))
597
+ await self.wss.receive()
598
+
599
+ async def close(self) -> None:
600
+ """
601
+ Close the connection
602
+ """
603
+ if self.wss and not self.wss.closed:
604
+ await self.wss.close()
605
+ if self.session and not self.session.closed:
606
+ await self.session.close()
607
+
608
+
609
+ class Chatbot:
610
+ """
611
+ Combines everything to make it seamless
612
+ """
613
+
614
+ def __init__(
615
+ self,
616
+ proxy: str | None = None,
617
+ cookies: list[dict] | None = None,
618
+ ) -> None:
619
+ self.proxy: str | None = proxy
620
+ self.chat_hub: _ChatHub = _ChatHub(
621
+ _Conversation(self.proxy, cookies=cookies),
622
+ proxy=self.proxy,
623
+ cookies=cookies,
624
+ )
625
+
626
+ @staticmethod
627
+ async def create(
628
+ proxy: str | None = None,
629
+ cookies: list[dict] | None = None,
630
+ ):
631
+ self = Chatbot.__new__(Chatbot)
632
+ self.proxy = proxy
633
+ self.chat_hub = _ChatHub(
634
+ await _Conversation.create(self.proxy, cookies=cookies),
635
+ proxy=self.proxy,
636
+ cookies=cookies,
637
+ )
638
+ return self
639
+
640
+ async def ask(
641
+ self,
642
+ prompt: str,
643
+ wss_link: str = "wss://sydney.bing.com/sydney/ChatHub",
644
+ conversation_style: CONVERSATION_STYLE_TYPE = None,
645
+ options: dict = None,
646
+ webpage_context: str | None = None,
647
+ search_result: bool = False,
648
+ ) -> dict:
649
+ """
650
+ Ask a question to the bot
651
+ """
652
+ async for final, response in self.chat_hub.ask_stream(
653
+ prompt=prompt,
654
+ conversation_style=conversation_style,
655
+ wss_link=wss_link,
656
+ options=options,
657
+ webpage_context=webpage_context,
658
+ search_result=search_result,
659
+ ):
660
+ if final:
661
+ return response
662
+ await self.chat_hub.wss.close()
663
+ return {}
664
+
665
+ async def ask_stream(
666
+ self,
667
+ prompt: str,
668
+ wss_link: str = "wss://sydney.bing.com/sydney/ChatHub",
669
+ conversation_style: CONVERSATION_STYLE_TYPE = None,
670
+ raw: bool = False,
671
+ options: dict = None,
672
+ webpage_context: str | None = None,
673
+ search_result: bool = False,
674
+ ) -> Generator[str, None, None]:
675
+ """
676
+ Ask a question to the bot
677
+ """
678
+ async for response in self.chat_hub.ask_stream(
679
+ prompt=prompt,
680
+ conversation_style=conversation_style,
681
+ wss_link=wss_link,
682
+ raw=raw,
683
+ options=options,
684
+ webpage_context=webpage_context,
685
+ search_result=search_result,
686
+ ):
687
+ yield response
688
+
689
+ async def close(self) -> None:
690
+ """
691
+ Close the connection
692
+ """
693
+ await self.chat_hub.close()
694
+
695
+ async def reset(self) -> None:
696
+ """
697
+ Reset the conversation
698
+ """
699
+ await self.close()
700
+ self.chat_hub = _ChatHub(
701
+ await _Conversation.create(self.proxy),
702
+ proxy=self.proxy,
703
+ cookies=self.chat_hub.cookies,
704
+ )
705
+
706
+
707
+ async def _get_input_async(
708
+ session: PromptSession = None,
709
+ completer: WordCompleter = None,
710
+ ) -> str:
711
+ """
712
+ Multiline input function.
713
+ """
714
+ return await session.prompt_async(
715
+ completer=completer,
716
+ multiline=True,
717
+ auto_suggest=AutoSuggestFromHistory(),
718
+ )
719
+
720
+
721
+ def _create_session() -> PromptSession:
722
+ kb = KeyBindings()
723
+
724
+ @kb.add("enter")
725
+ def _(event):
726
+ buffer_text = event.current_buffer.text
727
+ if buffer_text.startswith("!"):
728
+ event.current_buffer.validate_and_handle()
729
+ else:
730
+ event.current_buffer.insert_text("\n")
731
+
732
+ @kb.add("escape")
733
+ def _(event):
734
+ if event.current_buffer.complete_state:
735
+ # event.current_buffer.cancel_completion()
736
+ event.current_buffer.text = ""
737
+
738
+ return PromptSession(key_bindings=kb, history=InMemoryHistory())
739
+
740
+
741
+ def _create_completer(commands: list, pattern_str: str = "$"):
742
+ return WordCompleter(words=commands, pattern=re.compile(pattern_str))
743
+
744
+
745
+ async def async_main(args: argparse.Namespace) -> None:
746
+ """
747
+ Main function
748
+ """
749
+ print("Initializing...")
750
+ print("Enter `alt+enter` or `escape+enter` to send a message")
751
+ # Read and parse cookies
752
+ cookies = None
753
+ if args.cookie_file:
754
+ cookies = json.loads(open(args.cookie_file, encoding="utf-8").read())
755
+ bot = await Chatbot.create(proxy=args.proxy, cookies=cookies)
756
+ session = _create_session()
757
+ completer = _create_completer(["!help", "!exit", "!reset"])
758
+ initial_prompt = args.prompt
759
+
760
+ while True:
761
+ print("\nYou:")
762
+ if initial_prompt:
763
+ question = initial_prompt
764
+ print(question)
765
+ initial_prompt = None
766
+ else:
767
+ question = (
768
+ input()
769
+ if args.enter_once
770
+ else await _get_input_async(session=session, completer=completer)
771
+ )
772
+ print()
773
+ if question == "!exit":
774
+ break
775
+ if question == "!help":
776
+ print(
777
+ """
778
+ !help - Show this help message
779
+ !exit - Exit the program
780
+ !reset - Reset the conversation
781
+ """,
782
+ )
783
+ continue
784
+ if question == "!reset":
785
+ await bot.reset()
786
+ continue
787
+ print("Bot:")
788
+ if args.no_stream:
789
+ print(
790
+ (
791
+ await bot.ask(
792
+ prompt=question,
793
+ conversation_style=args.style,
794
+ wss_link=args.wss_link,
795
+ )
796
+ )["item"]["messages"][1]["adaptiveCards"][0]["body"][0]["text"],
797
+ )
798
+ else:
799
+ wrote = 0
800
+ if args.rich:
801
+ md = Markdown("")
802
+ with Live(md, auto_refresh=False) as live:
803
+ async for final, response in bot.ask_stream(
804
+ prompt=question,
805
+ conversation_style=args.style,
806
+ wss_link=args.wss_link,
807
+ ):
808
+ if not final:
809
+ if wrote > len(response):
810
+ print(md)
811
+ print(Markdown("***Bing revoked the response.***"))
812
+ wrote = len(response)
813
+ md = Markdown(response)
814
+ live.update(md, refresh=True)
815
+ else:
816
+ async for final, response in bot.ask_stream(
817
+ prompt=question,
818
+ conversation_style=args.style,
819
+ wss_link=args.wss_link,
820
+ ):
821
+ if not final:
822
+ if not wrote:
823
+ print(response, end="", flush=True)
824
+ else:
825
+ print(response[wrote:], end="", flush=True)
826
+ wrote = len(response)
827
+ print()
828
+ await bot.close()
829
+
830
+
831
+ def main() -> None:
832
+ print(
833
+ """
834
+ EdgeGPT - A demo of reverse engineering the Bing GPT chatbot
835
+ Repo: github.com/acheong08/EdgeGPT
836
+ By: Antonio Cheong
837
+
838
+ !help for help
839
+
840
+ Type !exit to exit
841
+ """,
842
+ )
843
+ parser = argparse.ArgumentParser()
844
+ parser.add_argument("--enter-once", action="store_true")
845
+ parser.add_argument("--no-stream", action="store_true")
846
+ parser.add_argument("--rich", action="store_true")
847
+ parser.add_argument(
848
+ "--proxy",
849
+ help="Proxy URL (e.g. socks5://127.0.0.1:1080)",
850
+ type=str,
851
+ )
852
+ parser.add_argument(
853
+ "--wss-link",
854
+ help="WSS URL(e.g. wss://sydney.bing.com/sydney/ChatHub)",
855
+ type=str,
856
+ default="wss://sydney.bing.com/sydney/ChatHub",
857
+ )
858
+ parser.add_argument(
859
+ "--style",
860
+ choices=["creative", "balanced", "precise"],
861
+ default="balanced",
862
+ )
863
+ parser.add_argument(
864
+ "--prompt",
865
+ type=str,
866
+ default="",
867
+ required=False,
868
+ help="prompt to start with",
869
+ )
870
+ parser.add_argument(
871
+ "--cookie-file",
872
+ type=str,
873
+ default="",
874
+ required=False,
875
+ help="path to cookie file",
876
+ )
877
+ args = parser.parse_args()
878
+ asyncio.run(async_main(args))
879
+
880
+
881
+ class Cookie:
882
+ """
883
+ Convenience class for Bing Cookie files, data, and configuration. This Class
884
+ is updated dynamically by the Query class to allow cycling through >1
885
+ cookie/credentials file e.g. when daily request limits (current 200 per
886
+ account per day) are exceeded.
887
+ """
888
+
889
+ current_file_index = 0
890
+ dirpath = Path("./").resolve()
891
+ search_pattern = "bing_cookies_*.json"
892
+ ignore_files = set()
893
+
894
+ @classmethod
895
+ def fetch_default(cls, path=None):
896
+ from selenium import webdriver
897
+ from selenium.webdriver.common.by import By
898
+
899
+ driver = webdriver.Edge()
900
+ driver.get("https://bing.com/chat")
901
+ time.sleep(5)
902
+ xpath = '//button[@id="bnp_btn_accept"]'
903
+ driver.find_element(By.XPATH, xpath).click()
904
+ time.sleep(2)
905
+ xpath = '//a[@id="codexPrimaryButton"]'
906
+ driver.find_element(By.XPATH, xpath).click()
907
+ if path is None:
908
+ path = Path("./bing_cookies__default.json")
909
+ # Double underscore ensures this file is first when sorted
910
+ cookies = driver.get_cookies()
911
+ Path(path).write_text(json.dumps(cookies, indent=4), encoding="utf-8")
912
+ # Path again in case supplied path is: str
913
+ print(f"Cookies saved to: {path}")
914
+ driver.quit()
915
+
916
+ @classmethod
917
+ def files(cls):
918
+ """Return a sorted list of all cookie files matching .search_pattern"""
919
+ all_files = set(cls.dirpath.glob(cls.search_pattern))
920
+ return sorted(list(all_files - cls.ignore_files))
921
+
922
+ @classmethod
923
+ def import_data(cls):
924
+ """
925
+ Read the active cookie file and populate the following attributes:
926
+
927
+ .current_filepath
928
+ .current_data
929
+ .image_token
930
+ """
931
+ try:
932
+ cls.current_filepath = cls.files()[cls.current_file_index]
933
+ except IndexError:
934
+ print(
935
+ "> Please set Cookie.current_filepath to a valid cookie file, then run Cookie.import_data()",
936
+ )
937
+ return
938
+ print(f"> Importing cookies from: {cls.current_filepath.name}")
939
+ with open(cls.current_filepath, encoding="utf-8") as file:
940
+ cls.current_data = json.load(file)
941
+ cls.image_token = [x for x in cls.current_data if x.get("name") == "_U"]
942
+ cls.image_token = cls.image_token[0].get("value")
943
+
944
+ @classmethod
945
+ def import_next(cls):
946
+ """
947
+ Cycle through to the next cookies file. Import it. Mark the previous
948
+ file to be ignored for the remainder of the current session.
949
+ """
950
+ cls.ignore_files.add(cls.current_filepath)
951
+ if Cookie.current_file_index >= len(cls.files()):
952
+ Cookie.current_file_index = 0
953
+ Cookie.import_data()
954
+
955
+
956
+ class Query:
957
+ """
958
+ A convenience class that wraps around EdgeGPT.Chatbot to encapsulate input,
959
+ config, and output all together. Relies on Cookie class for authentication
960
+ """
961
+
962
+ def __init__(
963
+ self,
964
+ prompt,
965
+ style="precise",
966
+ content_type="text",
967
+ cookie_file=0,
968
+ echo=True,
969
+ echo_prompt=False,
970
+ ):
971
+ """
972
+ Arguments:
973
+
974
+ prompt: Text to enter into Bing Chat
975
+ style: creative, balanced, or precise
976
+ content_type: "text" for Bing Chat; "image" for Dall-e
977
+ cookie_file: Path, filepath string, or index (int) to list of cookie paths
978
+ echo: Print something to confirm request made
979
+ echo_prompt: Print confirmation of the evaluated prompt
980
+ """
981
+ self.index = []
982
+ self.request_count = {}
983
+ self.image_dirpath = Path("./").resolve()
984
+ Cookie.import_data()
985
+ self.index += [self]
986
+ self.prompt = prompt
987
+ files = Cookie.files()
988
+ if isinstance(cookie_file, int):
989
+ index = cookie_file if cookie_file < len(files) else 0
990
+ else:
991
+ if not isinstance(cookie_file, (str, Path)):
992
+ message = "'cookie_file' must be an int, str, or Path object"
993
+ raise TypeError(message)
994
+ cookie_file = Path(cookie_file)
995
+ if cookie_file in files(): # Supplied filepath IS in Cookie.dirpath
996
+ index = files.index(cookie_file)
997
+ else: # Supplied filepath is NOT in Cookie.dirpath
998
+ if cookie_file.is_file():
999
+ Cookie.dirpath = cookie_file.parent.resolve()
1000
+ if cookie_file.is_dir():
1001
+ Cookie.dirpath = cookie_file.resolve()
1002
+ index = 0
1003
+ Cookie.current_file_index = index
1004
+ if content_type == "text":
1005
+ self.style = style
1006
+ self.log_and_send_query(echo, echo_prompt)
1007
+ if content_type == "image":
1008
+ self.create_image()
1009
+
1010
+ def log_and_send_query(self, echo, echo_prompt):
1011
+ self.response = asyncio.run(self.send_to_bing(echo, echo_prompt))
1012
+ name = str(Cookie.current_filepath.name)
1013
+ if not self.request_count.get(name):
1014
+ self.request_count[name] = 1
1015
+ else:
1016
+ self.request_count[name] += 1
1017
+
1018
+ def create_image(self):
1019
+ image_generator = ImageGen(Cookie.image_token)
1020
+ image_generator.save_images(
1021
+ image_generator.get_images(self.prompt),
1022
+ output_dir=self.image_dirpath,
1023
+ )
1024
+
1025
+ async def send_to_bing(self, echo=True, echo_prompt=False):
1026
+ """Creat, submit, then close a Chatbot instance. Return the response"""
1027
+ retries = len(Cookie.files())
1028
+ while retries:
1029
+ try:
1030
+ bot = await Chatbot.create()
1031
+ if echo_prompt:
1032
+ print(f"> {self.prompt=}")
1033
+ if echo:
1034
+ print("> Waiting for response...")
1035
+ if self.style.lower() not in "creative balanced precise".split():
1036
+ self.style = "precise"
1037
+ response = await bot.ask(
1038
+ prompt=self.prompt,
1039
+ conversation_style=getattr(ConversationStyle, self.style),
1040
+ # wss_link="wss://sydney.bing.com/sydney/ChatHub"
1041
+ # What other values can this parameter take? It seems to be optional
1042
+ )
1043
+ return response
1044
+ except KeyError:
1045
+ print(
1046
+ f"> KeyError [{Cookie.current_filepath.name} may have exceeded the daily limit]",
1047
+ )
1048
+ Cookie.import_next()
1049
+ retries -= 1
1050
+ finally:
1051
+ await bot.close()
1052
+
1053
+ @property
1054
+ def output(self):
1055
+ """The response from a completed Chatbot request"""
1056
+ return self.response["item"]["messages"][1]["text"]
1057
+
1058
+ @property
1059
+ def sources(self):
1060
+ """The source names and details parsed from a completed Chatbot request"""
1061
+ return self.response["item"]["messages"][1]["sourceAttributions"]
1062
+
1063
+ @property
1064
+ def sources_dict(self):
1065
+ """The source names and details as a dictionary"""
1066
+ sources_dict = {}
1067
+ name = "providerDisplayName"
1068
+ url = "seeMoreUrl"
1069
+ for source in self.sources:
1070
+ if name in source.keys() and url in source.keys():
1071
+ sources_dict[source[name]] = source[url]
1072
+ else:
1073
+ continue
1074
+ return sources_dict
1075
+
1076
+ @property
1077
+ def code(self):
1078
+ """Extract and join any snippets of Python code in the response"""
1079
+ code_blocks = self.output.split("```")[1:-1:2]
1080
+ code_blocks = ["\n".join(x.splitlines()[1:]) for x in code_blocks]
1081
+ return "\n\n".join(code_blocks)
1082
+
1083
+ @property
1084
+ def languages(self):
1085
+ """Extract all programming languages given in code blocks"""
1086
+ code_blocks = self.output.split("```")[1:-1:2]
1087
+ return {x.splitlines()[0] for x in code_blocks}
1088
+
1089
+ @property
1090
+ def suggestions(self):
1091
+ """Follow-on questions suggested by the Chatbot"""
1092
+ return [
1093
+ x["text"]
1094
+ for x in self.response["item"]["messages"][1]["suggestedResponses"]
1095
+ ]
1096
+
1097
+ def __repr__(self):
1098
+ return f"<EdgeGPT.Query: {self.prompt}>"
1099
+
1100
+ def __str__(self):
1101
+ return self.output
1102
+
1103
+
1104
+ class ImageQuery(Query):
1105
+ def __init__(self, prompt, **kwargs):
1106
+ kwargs.update({"content_type": "image"})
1107
+ super().__init__(prompt, **kwargs)
1108
+
1109
+ def __repr__(self):
1110
+ return f"<EdgeGPT.ImageQuery: {self.prompt}>"
1111
+
1112
+
1113
+ if __name__ == "__main__":
1114
+ main()
request_llm/test_llms.py CHANGED
@@ -9,69 +9,70 @@ def validate_path():
9
  sys.path.append(root_dir_assume)
10
 
11
  validate_path() # validate path so you can run from base directory
 
 
 
 
 
12
 
13
- from request_llm.bridge_moss import predict_no_ui_long_connection
14
- # from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
15
- # from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
 
 
16
 
17
- llm_kwargs = {
18
- 'max_length': 512,
19
- 'top_p': 1,
20
- 'temperature': 1,
21
- }
22
 
23
- result = predict_no_ui_long_connection(inputs="你好",
24
- llm_kwargs=llm_kwargs,
25
- history=[],
26
- sys_prompt="")
27
- print('final result:', result)
28
 
 
 
 
 
 
29
 
30
- result = predict_no_ui_long_connection(inputs="what is a hero?",
31
- llm_kwargs=llm_kwargs,
32
- history=["hello world"],
33
- sys_prompt="")
34
- print('final result:', result)
35
 
36
- result = predict_no_ui_long_connection(inputs="如何理解传奇?",
37
- llm_kwargs=llm_kwargs,
38
- history=[],
39
- sys_prompt="")
40
- print('final result:', result)
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- # # print(result)
43
- # from multiprocessing import Process, Pipe
44
- # class GetGLMHandle(Process):
45
- # def __init__(self):
46
- # super().__init__(daemon=True)
47
- # pass
48
- # def run(self):
49
- # # 子进程执行
50
- # # 第一次运行,加载参数
51
- # def validate_path():
52
- # import os, sys
53
- # dir_name = os.path.dirname(__file__)
54
- # root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
55
- # os.chdir(root_dir_assume + '/request_llm/jittorllms')
56
- # sys.path.append(root_dir_assume + '/request_llm/jittorllms')
57
- # validate_path() # validate path so you can run from base directory
58
 
59
- # jittorllms_model = None
60
- # import types
61
- # try:
62
- # if jittorllms_model is None:
63
- # from models import get_model
64
- # # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
65
- # args_dict = {'model': 'chatrwkv'}
66
- # print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
67
- # jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
68
- # print('done get model')
69
- # except:
70
- # # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
71
- # raise RuntimeError("不能正常加载jittorllms的参数!")
72
-
73
- # x = GetGLMHandle()
74
- # x.start()
75
 
76
-
77
- # input()
 
9
  sys.path.append(root_dir_assume)
10
 
11
  validate_path() # validate path so you can run from base directory
12
+ if __name__ == "__main__":
13
+ from request_llm.bridge_newbingfree import predict_no_ui_long_connection
14
+ # from request_llm.bridge_moss import predict_no_ui_long_connection
15
+ # from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
16
+ # from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
17
 
18
+ llm_kwargs = {
19
+ 'max_length': 512,
20
+ 'top_p': 1,
21
+ 'temperature': 1,
22
+ }
23
 
24
+ result = predict_no_ui_long_connection(inputs="你好",
25
+ llm_kwargs=llm_kwargs,
26
+ history=[],
27
+ sys_prompt="")
28
+ print('final result:', result)
29
 
 
 
 
 
 
30
 
31
+ result = predict_no_ui_long_connection(inputs="what is a hero?",
32
+ llm_kwargs=llm_kwargs,
33
+ history=["hello world"],
34
+ sys_prompt="")
35
+ print('final result:', result)
36
 
37
+ result = predict_no_ui_long_connection(inputs="如何理解传奇?",
38
+ llm_kwargs=llm_kwargs,
39
+ history=[],
40
+ sys_prompt="")
41
+ print('final result:', result)
42
 
43
+ # # print(result)
44
+ # from multiprocessing import Process, Pipe
45
+ # class GetGLMHandle(Process):
46
+ # def __init__(self):
47
+ # super().__init__(daemon=True)
48
+ # pass
49
+ # def run(self):
50
+ # # 子进程执行
51
+ # # 第一次运行,加载参数
52
+ # def validate_path():
53
+ # import os, sys
54
+ # dir_name = os.path.dirname(__file__)
55
+ # root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
56
+ # os.chdir(root_dir_assume + '/request_llm/jittorllms')
57
+ # sys.path.append(root_dir_assume + '/request_llm/jittorllms')
58
+ # validate_path() # validate path so you can run from base directory
59
 
60
+ # jittorllms_model = None
61
+ # import types
62
+ # try:
63
+ # if jittorllms_model is None:
64
+ # from models import get_model
65
+ # # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
66
+ # args_dict = {'model': 'chatrwkv'}
67
+ # print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
68
+ # jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
69
+ # print('done get model')
70
+ # except:
71
+ # # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
72
+ # raise RuntimeError("不能正常加载jittorllms的参数!")
73
+
74
+ # x = GetGLMHandle()
75
+ # x.start()
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
+ # input()