qingxu99 commited on
Commit
65317e3
2 Parent(s): 06fbdf4 2b96217

Merge branch 'newbing' into v3.3

Browse files
config.py CHANGED
@@ -44,8 +44,8 @@ WEB_PORT = -1
44
  MAX_RETRY = 2
45
 
46
  # OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
47
- LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
48
- AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm"]
49
 
50
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
51
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
@@ -63,3 +63,6 @@ API_URL_REDIRECT = {}
63
 
64
  # 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
65
  CUSTOM_PATH = "/"
 
 
 
 
44
  MAX_RETRY = 2
45
 
46
  # OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
47
+ LLM_MODEL = "newbing" # 可选 ↓↓↓
48
+ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing"]
49
 
50
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
51
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
 
63
 
64
  # 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
65
  CUSTOM_PATH = "/"
66
+
67
+
68
+ newbing_cookies = """your bing cookies here"""
request_llm/bridge_all.py CHANGED
@@ -19,6 +19,9 @@ from .bridge_chatgpt import predict as chatgpt_ui
19
  from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
20
  from .bridge_chatglm import predict as chatglm_ui
21
 
 
 
 
22
  # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
23
  # from .bridge_tgui import predict as tgui_ui
24
 
@@ -116,7 +119,15 @@ model_info = {
116
  "tokenizer": tokenizer_gpt35,
117
  "token_cnt": get_token_num_gpt35,
118
  },
119
-
 
 
 
 
 
 
 
 
120
  }
121
 
122
 
 
19
  from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
20
  from .bridge_chatglm import predict as chatglm_ui
21
 
22
+ from .bridge_newbing import predict_no_ui_long_connection as newbing_noui
23
+ from .bridge_newbing import predict as newbing_ui
24
+
25
  # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
26
  # from .bridge_tgui import predict as tgui_ui
27
 
 
119
  "tokenizer": tokenizer_gpt35,
120
  "token_cnt": get_token_num_gpt35,
121
  },
122
+ # newbing
123
+ "newbing": {
124
+ "fn_with_ui": newbing_ui,
125
+ "fn_without_ui": newbing_noui,
126
+ "endpoint": None,
127
+ "max_token": 4096,
128
+ "tokenizer": tokenizer_gpt35,
129
+ "token_cnt": get_token_num_gpt35,
130
+ },
131
  }
132
 
133
 
request_llm/bridge_newbing.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main.py
3
+ """
4
+
5
+ from transformers import AutoModel, AutoTokenizer
6
+ import time
7
+ import importlib
8
+ from toolbox import update_ui, get_conf
9
+ from multiprocessing import Process, Pipe
10
+ import argparse
11
+ import asyncio
12
+ import json
13
+ import os
14
+ import random
15
+ import re
16
+ import ssl
17
+ import sys
18
+ import uuid
19
+ from enum import Enum
20
+ from pathlib import Path
21
+ from typing import Generator
22
+ from typing import Literal
23
+ from typing import Optional
24
+ from typing import Union
25
+ import certifi
26
+ import httpx
27
+ import websockets.client as websockets
28
+ from prompt_toolkit import PromptSession
29
+ from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
30
+ from prompt_toolkit.completion import WordCompleter
31
+ from prompt_toolkit.history import InMemoryHistory
32
+ from prompt_toolkit.key_binding import KeyBindings
33
+ from rich.live import Live
34
+ from rich.markdown import Markdown
35
+
36
+ DELIMITER = "\x1e"
37
+
38
+
39
+ # Generate random IP between range 13.104.0.0/14
40
+ FORWARDED_IP = (
41
+ f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
42
+ )
43
+
44
+ HEADERS = {
45
+ "accept": "application/json",
46
+ "accept-language": "en-US,en;q=0.9",
47
+ "content-type": "application/json",
48
+ "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
49
+ "sec-ch-ua-arch": '"x86"',
50
+ "sec-ch-ua-bitness": '"64"',
51
+ "sec-ch-ua-full-version": '"109.0.1518.78"',
52
+ "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
53
+ "sec-ch-ua-mobile": "?0",
54
+ "sec-ch-ua-model": "",
55
+ "sec-ch-ua-platform": '"Windows"',
56
+ "sec-ch-ua-platform-version": '"15.0.0"',
57
+ "sec-fetch-dest": "empty",
58
+ "sec-fetch-mode": "cors",
59
+ "sec-fetch-site": "same-origin",
60
+ "x-ms-client-request-id": str(uuid.uuid4()),
61
+ "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
62
+ "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
63
+ "Referrer-Policy": "origin-when-cross-origin",
64
+ "x-forwarded-for": FORWARDED_IP,
65
+ }
66
+
67
+ HEADERS_INIT_CONVER = {
68
+ "authority": "edgeservices.bing.com",
69
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
70
+ "accept-language": "en-US,en;q=0.9",
71
+ "cache-control": "max-age=0",
72
+ "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
73
+ "sec-ch-ua-arch": '"x86"',
74
+ "sec-ch-ua-bitness": '"64"',
75
+ "sec-ch-ua-full-version": '"110.0.1587.69"',
76
+ "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
77
+ "sec-ch-ua-mobile": "?0",
78
+ "sec-ch-ua-model": '""',
79
+ "sec-ch-ua-platform": '"Windows"',
80
+ "sec-ch-ua-platform-version": '"15.0.0"',
81
+ "sec-fetch-dest": "document",
82
+ "sec-fetch-mode": "navigate",
83
+ "sec-fetch-site": "none",
84
+ "sec-fetch-user": "?1",
85
+ "upgrade-insecure-requests": "1",
86
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
87
+ "x-edge-shopping-flag": "1",
88
+ "x-forwarded-for": FORWARDED_IP,
89
+ }
90
+
91
+ ssl_context = ssl.create_default_context()
92
+ ssl_context.load_verify_locations(certifi.where())
93
+
94
+
95
+ class NotAllowedToAccess(Exception):
96
+ pass
97
+
98
+
99
+ class ConversationStyle(Enum):
100
+ creative = "h3imaginative,clgalileo,gencontentv3"
101
+ balanced = "galileo"
102
+ precise = "h3precise,clgalileo"
103
+
104
+
105
+ CONVERSATION_STYLE_TYPE = Optional[
106
+ Union[ConversationStyle, Literal["creative", "balanced", "precise"]]
107
+ ]
108
+
109
+
110
+ def _append_identifier(msg: dict) -> str:
111
+ """
112
+ Appends special character to end of message to identify end of message
113
+ """
114
+ # Convert dict to json string
115
+ return json.dumps(msg) + DELIMITER
116
+
117
+
118
+ def _get_ran_hex(length: int = 32) -> str:
119
+ """
120
+ Returns random hex string
121
+ """
122
+ return "".join(random.choice("0123456789abcdef") for _ in range(length))
123
+
124
+
125
+ class _ChatHubRequest:
126
+ """
127
+ Request object for ChatHub
128
+ """
129
+
130
+ def __init__(
131
+ self,
132
+ conversation_signature: str,
133
+ client_id: str,
134
+ conversation_id: str,
135
+ invocation_id: int = 0,
136
+ ) -> None:
137
+ self.struct: dict = {}
138
+
139
+ self.client_id: str = client_id
140
+ self.conversation_id: str = conversation_id
141
+ self.conversation_signature: str = conversation_signature
142
+ self.invocation_id: int = invocation_id
143
+
144
+ def update(
145
+ self,
146
+ prompt: str,
147
+ conversation_style: CONVERSATION_STYLE_TYPE,
148
+ options: list | None = None,
149
+ ) -> None:
150
+ """
151
+ Updates request object
152
+ """
153
+ if options is None:
154
+ options = [
155
+ "deepleo",
156
+ "enable_debug_commands",
157
+ "disable_emoji_spoken_text",
158
+ "enablemm",
159
+ ]
160
+ if conversation_style:
161
+ if not isinstance(conversation_style, ConversationStyle):
162
+ conversation_style = getattr(ConversationStyle, conversation_style)
163
+ options = [
164
+ "nlu_direct_response_filter",
165
+ "deepleo",
166
+ "disable_emoji_spoken_text",
167
+ "responsible_ai_policy_235",
168
+ "enablemm",
169
+ conversation_style.value,
170
+ "dtappid",
171
+ "cricinfo",
172
+ "cricinfov2",
173
+ "dv3sugg",
174
+ ]
175
+ self.struct = {
176
+ "arguments": [
177
+ {
178
+ "source": "cib",
179
+ "optionsSets": options,
180
+ "sliceIds": [
181
+ "222dtappid",
182
+ "225cricinfo",
183
+ "224locals0",
184
+ ],
185
+ "traceId": _get_ran_hex(32),
186
+ "isStartOfSession": self.invocation_id == 0,
187
+ "message": {
188
+ "author": "user",
189
+ "inputMethod": "Keyboard",
190
+ "text": prompt,
191
+ "messageType": "Chat",
192
+ },
193
+ "conversationSignature": self.conversation_signature,
194
+ "participant": {
195
+ "id": self.client_id,
196
+ },
197
+ "conversationId": self.conversation_id,
198
+ },
199
+ ],
200
+ "invocationId": str(self.invocation_id),
201
+ "target": "chat",
202
+ "type": 4,
203
+ }
204
+ self.invocation_id += 1
205
+
206
+
207
+ class _Conversation:
208
+ """
209
+ Conversation API
210
+ """
211
+
212
+ def __init__(
213
+ self,
214
+ cookies: dict,
215
+ proxy: str | None = None,
216
+ ) -> None:
217
+ self.struct: dict = {
218
+ "conversationId": None,
219
+ "clientId": None,
220
+ "conversationSignature": None,
221
+ "result": {"value": "Success", "message": None},
222
+ }
223
+ self.proxy = proxy
224
+ proxy = (
225
+ proxy
226
+ or os.environ.get("all_proxy")
227
+ or os.environ.get("ALL_PROXY")
228
+ or os.environ.get("https_proxy")
229
+ or os.environ.get("HTTPS_PROXY")
230
+ or None
231
+ )
232
+ if proxy is not None and proxy.startswith("socks5h://"):
233
+ proxy = "socks5://" + proxy[len("socks5h://") :]
234
+ self.session = httpx.Client(
235
+ proxies=proxy,
236
+ timeout=30,
237
+ headers=HEADERS_INIT_CONVER,
238
+ )
239
+ for cookie in cookies:
240
+ self.session.cookies.set(cookie["name"], cookie["value"])
241
+
242
+ # Send GET request
243
+ response = self.session.get(
244
+ url=os.environ.get("BING_PROXY_URL")
245
+ or "https://edgeservices.bing.com/edgesvc/turing/conversation/create",
246
+ )
247
+ if response.status_code != 200:
248
+ response = self.session.get(
249
+ "https://edge.churchless.tech/edgesvc/turing/conversation/create",
250
+ )
251
+ if response.status_code != 200:
252
+ print(f"Status code: {response.status_code}")
253
+ print(response.text)
254
+ print(response.url)
255
+ raise Exception("Authentication failed")
256
+ try:
257
+ self.struct = response.json()
258
+ except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:
259
+ raise Exception(
260
+ "Authentication failed. You have not been accepted into the beta.",
261
+ ) from exc
262
+ if self.struct["result"]["value"] == "UnauthorizedRequest":
263
+ raise NotAllowedToAccess(self.struct["result"]["message"])
264
+
265
+
266
+ class _ChatHub:
267
+ """
268
+ Chat API
269
+ """
270
+
271
+ def __init__(self, conversation: _Conversation) -> None:
272
+ self.wss: websockets.WebSocketClientProtocol | None = None
273
+ self.request: _ChatHubRequest
274
+ self.loop: bool
275
+ self.task: asyncio.Task
276
+ self.request = _ChatHubRequest(
277
+ conversation_signature=conversation.struct["conversationSignature"],
278
+ client_id=conversation.struct["clientId"],
279
+ conversation_id=conversation.struct["conversationId"],
280
+ )
281
+
282
+ async def ask_stream(
283
+ self,
284
+ prompt: str,
285
+ wss_link: str,
286
+ conversation_style: CONVERSATION_STYLE_TYPE = None,
287
+ raw: bool = False,
288
+ options: dict = None,
289
+ ) -> Generator[str, None, None]:
290
+ """
291
+ Ask a question to the bot
292
+ """
293
+ if self.wss and not self.wss.closed:
294
+ await self.wss.close()
295
+ # Check if websocket is closed
296
+ self.wss = await websockets.connect(
297
+ wss_link,
298
+ extra_headers=HEADERS,
299
+ max_size=None,
300
+ ssl=ssl_context,
301
+ )
302
+ await self._initial_handshake()
303
+ # Construct a ChatHub request
304
+ self.request.update(
305
+ prompt=prompt,
306
+ conversation_style=conversation_style,
307
+ options=options,
308
+ )
309
+ # Send request
310
+ await self.wss.send(_append_identifier(self.request.struct))
311
+ final = False
312
+ while not final:
313
+ objects = str(await self.wss.recv()).split(DELIMITER)
314
+ for obj in objects:
315
+ if obj is None or not obj:
316
+ continue
317
+ response = json.loads(obj)
318
+ if response.get("type") != 2 and raw:
319
+ yield False, response
320
+ elif response.get("type") == 1 and response["arguments"][0].get(
321
+ "messages",
322
+ ):
323
+ resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][
324
+ 0
325
+ ]["body"][0].get("text")
326
+ yield False, resp_txt
327
+ elif response.get("type") == 2:
328
+ final = True
329
+ yield True, response
330
+
331
+ async def _initial_handshake(self) -> None:
332
+ await self.wss.send(_append_identifier({"protocol": "json", "version": 1}))
333
+ await self.wss.recv()
334
+
335
+ async def close(self) -> None:
336
+ """
337
+ Close the connection
338
+ """
339
+ if self.wss and not self.wss.closed:
340
+ await self.wss.close()
341
+
342
+
343
+ class Chatbot:
344
+ """
345
+ Combines everything to make it seamless
346
+ """
347
+
348
+ def __init__(
349
+ self,
350
+ cookies: dict = None,
351
+ proxy: str | None = None,
352
+ cookie_path: str = None,
353
+ ) -> None:
354
+ if cookies is None:
355
+ cookies = {}
356
+ if cookie_path is not None:
357
+ try:
358
+ with open(cookie_path, encoding="utf-8") as f:
359
+ self.cookies = json.load(f)
360
+ except FileNotFoundError as exc:
361
+ raise FileNotFoundError("Cookie file not found") from exc
362
+ else:
363
+ self.cookies = cookies
364
+ self.proxy: str | None = proxy
365
+ self.chat_hub: _ChatHub = _ChatHub(
366
+ _Conversation(self.cookies, self.proxy),
367
+ )
368
+
369
+ async def ask(
370
+ self,
371
+ prompt: str,
372
+ wss_link: str = "wss://sydney.bing.com/sydney/ChatHub",
373
+ conversation_style: CONVERSATION_STYLE_TYPE = None,
374
+ options: dict = None,
375
+ ) -> dict:
376
+ """
377
+ Ask a question to the bot
378
+ """
379
+ async for final, response in self.chat_hub.ask_stream(
380
+ prompt=prompt,
381
+ conversation_style=conversation_style,
382
+ wss_link=wss_link,
383
+ options=options,
384
+ ):
385
+ if final:
386
+ return response
387
+ await self.chat_hub.wss.close()
388
+ return None
389
+
390
+ async def ask_stream(
391
+ self,
392
+ prompt: str,
393
+ wss_link: str = "wss://sydney.bing.com/sydney/ChatHub",
394
+ conversation_style: CONVERSATION_STYLE_TYPE = None,
395
+ raw: bool = False,
396
+ options: dict = None,
397
+ ) -> Generator[str, None, None]:
398
+ """
399
+ Ask a question to the bot
400
+ """
401
+ async for response in self.chat_hub.ask_stream(
402
+ prompt=prompt,
403
+ conversation_style=conversation_style,
404
+ wss_link=wss_link,
405
+ raw=raw,
406
+ options=options,
407
+ ):
408
+ yield response
409
+
410
+ async def close(self) -> None:
411
+ """
412
+ Close the connection
413
+ """
414
+ await self.chat_hub.close()
415
+
416
+ async def reset(self) -> None:
417
+ """
418
+ Reset the conversation
419
+ """
420
+ await self.close()
421
+ self.chat_hub = _ChatHub(_Conversation(self.cookies))
422
+
423
+
424
+ async def _get_input_async(
425
+ session: PromptSession = None,
426
+ completer: WordCompleter = None,
427
+ ) -> str:
428
+ """
429
+ Multiline input function.
430
+ """
431
+ return await session.prompt_async(
432
+ completer=completer,
433
+ multiline=True,
434
+ auto_suggest=AutoSuggestFromHistory(),
435
+ )
436
+
437
+
438
+ def _create_session() -> PromptSession:
439
+ kb = KeyBindings()
440
+
441
+ @kb.add("enter")
442
+ def _(event):
443
+ buffer_text = event.current_buffer.text
444
+ if buffer_text.startswith("!"):
445
+ event.current_buffer.validate_and_handle()
446
+ else:
447
+ event.current_buffer.insert_text("\n")
448
+
449
+ @kb.add("escape")
450
+ def _(event):
451
+ if event.current_buffer.complete_state:
452
+ # event.current_buffer.cancel_completion()
453
+ event.current_buffer.text = ""
454
+
455
+ return PromptSession(key_bindings=kb, history=InMemoryHistory())
456
+
457
+
458
+ def _create_completer(commands: list, pattern_str: str = "$"):
459
+ return WordCompleter(words=commands, pattern=re.compile(pattern_str))
460
+
461
+
462
+ load_message = ""
463
+
464
+ #################################################################################
465
+ #################################################################################
466
+ #################################################################################
467
+ #################################################################################
468
+ #################################################################################
469
+ #################################################################################
470
+ class GetNewBingHandle(Process):
471
+ def __init__(self):
472
+ super().__init__(daemon=True)
473
+ self.parent, self.child = Pipe()
474
+ self.chatglm_model = None
475
+ self.chatglm_tokenizer = None
476
+ self.info = ""
477
+ self.success = True
478
+ self.check_dependency()
479
+ self.start()
480
+
481
+ def check_dependency(self):
482
+ try:
483
+ import rich
484
+ self.info = "依赖检测通过"
485
+ self.success = True
486
+ except:
487
+ self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
488
+ self.success = False
489
+
490
+ def ready(self):
491
+ return self.chatglm_model is not None
492
+
493
+ async def async_run(self, question):
494
+ async for final, response in self.chatglm_model.ask_stream(
495
+ prompt=question,
496
+ conversation_style="balanced", # ["creative", "balanced", "precise"]
497
+ wss_link="wss://sydney.bing.com/sydney/ChatHub", # "wss://sydney.bing.com/sydney/ChatHub"
498
+ ):
499
+ if not final:
500
+ self.child.send(response)
501
+ print(response)
502
+
503
+ def run(self):
504
+ # 第一次运行,加载参数
505
+ retry = 0
506
+ while True:
507
+ try:
508
+ if self.chatglm_model is None:
509
+ proxies, = get_conf('proxies')
510
+ newbing_cookies, = get_conf('newbing_cookies')
511
+ cookies = json.loads(newbing_cookies)
512
+ self.chatglm_model = Chatbot(proxy=proxies['https'], cookies=cookies)
513
+ break
514
+ else:
515
+ break
516
+ except:
517
+ retry += 1
518
+ if retry > 3:
519
+ self.child.send('[Local Message] 不能加载Newbing组件。')
520
+ raise RuntimeError("不能加载Newbing组件。")
521
+
522
+ # 进入任务等待状态
523
+ while True:
524
+ kwargs = self.child.recv()
525
+ try:
526
+ asyncio.run(self.async_run(question=kwargs['query']))
527
+ except:
528
+ self.child.send('[Local Message] Newbing失败.')
529
+ self.child.send('[Finish]')
530
+
531
+ def stream_chat(self, **kwargs):
532
+ self.parent.send(kwargs)
533
+ while True:
534
+ res = self.parent.recv()
535
+ if res != '[Finish]':
536
+ yield res
537
+ else:
538
+ break
539
+ return
540
+
541
+ global glm_handle
542
+ glm_handle = None
543
+ #################################################################################
544
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
545
+ """
546
+ 多线程方法
547
+ 函数的说明请见 request_llm/bridge_all.py
548
+ """
549
+ global glm_handle
550
+ if glm_handle is None:
551
+ glm_handle = GetNewBingHandle()
552
+ observe_window[0] = load_message + "\n\n" + glm_handle.info
553
+ if not glm_handle.success:
554
+ error = glm_handle.info
555
+ glm_handle = None
556
+ raise RuntimeError(error)
557
+
558
+ # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
559
+ history_feedin = []
560
+ history_feedin.append(["What can I do?", sys_prompt])
561
+ for i in range(len(history)//2):
562
+ history_feedin.append([history[2*i], history[2*i+1]] )
563
+
564
+ watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
565
+ response = ""
566
+ for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
567
+ observe_window[0] = response
568
+ if len(observe_window) >= 2:
569
+ if (time.time()-observe_window[1]) > watch_dog_patience:
570
+ raise RuntimeError("程序终止。")
571
+ return response
572
+
573
+
574
+
575
+ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
576
+ """
577
+ 单线程方法
578
+ 函数的说明请见 request_llm/bridge_all.py
579
+ """
580
+ chatbot.append((inputs, ""))
581
+
582
+ global glm_handle
583
+ if glm_handle is None:
584
+ glm_handle = GetNewBingHandle()
585
+ chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info)
586
+ yield from update_ui(chatbot=chatbot, history=[])
587
+ if not glm_handle.success:
588
+ glm_handle = None
589
+ return
590
+
591
+ if additional_fn is not None:
592
+ import core_functional
593
+ importlib.reload(core_functional) # 热更新prompt
594
+ core_functional = core_functional.get_core_functions()
595
+ if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
596
+ inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
597
+
598
+ history_feedin = []
599
+ history_feedin.append(["What can I do?", system_prompt] )
600
+ for i in range(len(history)//2):
601
+ history_feedin.append([history[2*i], history[2*i+1]] )
602
+
603
+ for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
604
+ chatbot[-1] = (inputs, response)
605
+ yield from update_ui(chatbot=chatbot, history=history)
606
+
607
+ history.extend([inputs, response])
608
+ yield from update_ui(chatbot=chatbot, history=history)
request_llm/requirements_newbing.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ BingImageCreator
2
+ certifi
3
+ httpx
4
+ prompt_toolkit
5
+ requests
6
+ rich
7
+ websockets
8
+ httpx[socks]