qingxu99 commited on
Commit
518385d
1 Parent(s): 4d1eea7

add newbing, testing

Browse files
config.py CHANGED
@@ -65,6 +65,7 @@ API_URL_REDIRECT = {}
65
  CUSTOM_PATH = "/"
66
 
67
  # 如果需要使用newbing,把newbing的长长的cookie放到这里
68
- newbing_cookies = """
 
69
  your bing cookies here
70
  """
 
65
  CUSTOM_PATH = "/"
66
 
67
  # 如果需要使用newbing,把newbing的长长的cookie放到这里
68
+ NEWBING_STYLE = "creative", # ["creative", "balanced", "precise"]
69
+ NEWBING_COOKIES = """
70
  your bing cookies here
71
  """
request_llm/bridge_all.py CHANGED
@@ -51,6 +51,7 @@ class LazyloadTiktoken(object):
51
  API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
52
  openai_endpoint = "https://api.openai.com/v1/chat/completions"
53
  api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
 
54
  # 兼容旧版的配置
55
  try:
56
  API_URL, = get_conf("API_URL")
@@ -62,6 +63,7 @@ except:
62
  # 新版配置
63
  if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
64
  if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
 
65
 
66
 
67
  # 获取tokenizer
@@ -123,7 +125,7 @@ model_info = {
123
  "newbing": {
124
  "fn_with_ui": newbing_ui,
125
  "fn_without_ui": newbing_noui,
126
- "endpoint": None,
127
  "max_token": 4096,
128
  "tokenizer": tokenizer_gpt35,
129
  "token_cnt": get_token_num_gpt35,
 
51
  API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
52
  openai_endpoint = "https://api.openai.com/v1/chat/completions"
53
  api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
54
+ newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
55
  # 兼容旧版的配置
56
  try:
57
  API_URL, = get_conf("API_URL")
 
63
  # 新版配置
64
  if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
65
  if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
66
+ if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
67
 
68
 
69
  # 获取tokenizer
 
125
  "newbing": {
126
  "fn_with_ui": newbing_ui,
127
  "fn_without_ui": newbing_noui,
128
+ "endpoint": newbing_endpoint,
129
  "max_token": 4096,
130
  "tokenizer": tokenizer_gpt35,
131
  "token_cnt": get_token_num_gpt35,
request_llm/bridge_newbing.py CHANGED
@@ -1,5 +1,8 @@
1
  """
2
- Main.py
 
 
 
3
  """
4
 
5
  from transformers import AutoModel, AutoTokenizer
@@ -273,6 +276,7 @@ class _ChatHub:
273
  self.request: _ChatHubRequest
274
  self.loop: bool
275
  self.task: asyncio.Task
 
276
  self.request = _ChatHubRequest(
277
  conversation_signature=conversation.struct["conversationSignature"],
278
  client_id=conversation.struct["clientId"],
@@ -461,20 +465,20 @@ def _create_completer(commands: list, pattern_str: str = "$"):
461
 
462
  load_message = ""
463
 
464
- #################################################################################
465
- #################################################################################
466
- #################################################################################
467
- #################################################################################
468
- #################################################################################
469
- #################################################################################
470
  class GetNewBingHandle(Process):
471
  def __init__(self):
472
  super().__init__(daemon=True)
473
  self.parent, self.child = Pipe()
474
- self.chatglm_model = None
475
- self.chatglm_tokenizer = None
476
  self.info = ""
477
  self.success = True
 
478
  self.check_dependency()
479
  self.start()
480
 
@@ -488,28 +492,36 @@ class GetNewBingHandle(Process):
488
  self.success = False
489
 
490
  def ready(self):
491
- return self.chatglm_model is not None
492
 
493
- async def async_run(self, question):
494
- async for final, response in self.chatglm_model.ask_stream(
 
 
 
 
 
 
 
495
  prompt=question,
496
- conversation_style="balanced", # ["creative", "balanced", "precise"]
497
- wss_link="wss://sydney.bing.com/sydney/ChatHub", # "wss://sydney.bing.com/sydney/ChatHub"
498
  ):
499
  if not final:
500
- self.child.send(response)
501
  print(response)
502
 
503
  def run(self):
504
  # 第一次运行,加载参数
505
  retry = 0
 
506
  while True:
507
  try:
508
- if self.chatglm_model is None:
509
  proxies, = get_conf('proxies')
510
- newbing_cookies, = get_conf('newbing_cookies')
511
- cookies = json.loads(newbing_cookies)
512
- self.chatglm_model = Chatbot(proxy=proxies['https'], cookies=cookies)
513
  break
514
  else:
515
  break
@@ -517,13 +529,14 @@ class GetNewBingHandle(Process):
517
  retry += 1
518
  if retry > 3:
519
  self.child.send('[Local Message] 不能加载Newbing组件。')
 
520
  raise RuntimeError("不能加载Newbing组件。")
521
 
522
  # 进入任务等待状态
523
  while True:
524
  kwargs = self.child.recv()
525
  try:
526
- asyncio.run(self.async_run(question=kwargs['query']))
527
  except:
528
  self.child.send('[Local Message] Newbing失败.')
529
  self.child.send('[Finish]')
@@ -538,24 +551,30 @@ class GetNewBingHandle(Process):
538
  break
539
  return
540
 
541
- global glm_handle
542
- glm_handle = None
543
- #################################################################################
 
 
 
 
 
 
544
  def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
545
  """
546
  多线程方法
547
  函数的说明请见 request_llm/bridge_all.py
548
  """
549
- global glm_handle
550
- if glm_handle is None:
551
- glm_handle = GetNewBingHandle()
552
- observe_window[0] = load_message + "\n\n" + glm_handle.info
553
- if not glm_handle.success:
554
- error = glm_handle.info
555
- glm_handle = None
556
  raise RuntimeError(error)
557
 
558
- # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
559
  history_feedin = []
560
  history_feedin.append(["What can I do?", sys_prompt])
561
  for i in range(len(history)//2):
@@ -563,7 +582,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
563
 
564
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
565
  response = ""
566
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
567
  observe_window[0] = response
568
  if len(observe_window) >= 2:
569
  if (time.time()-observe_window[1]) > watch_dog_patience:
@@ -579,13 +598,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
579
  """
580
  chatbot.append((inputs, ""))
581
 
582
- global glm_handle
583
- if glm_handle is None:
584
- glm_handle = GetNewBingHandle()
585
- chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info)
586
  yield from update_ui(chatbot=chatbot, history=[])
587
- if not glm_handle.success:
588
- glm_handle = None
589
  return
590
 
591
  if additional_fn is not None:
@@ -600,7 +619,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
600
  for i in range(len(history)//2):
601
  history_feedin.append([history[2*i], history[2*i+1]] )
602
 
603
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
604
  chatbot[-1] = (inputs, response)
605
  yield from update_ui(chatbot=chatbot, history=history)
606
 
 
1
  """
2
+ ========================================================================
3
+ 第一部分:来自EdgeGPT.py
4
+ https://github.com/acheong08/EdgeGPT
5
+ ========================================================================
6
  """
7
 
8
  from transformers import AutoModel, AutoTokenizer
 
276
  self.request: _ChatHubRequest
277
  self.loop: bool
278
  self.task: asyncio.Task
279
+ print(conversation.struct)
280
  self.request = _ChatHubRequest(
281
  conversation_signature=conversation.struct["conversationSignature"],
282
  client_id=conversation.struct["clientId"],
 
465
 
466
  load_message = ""
467
 
468
+ """
469
+ ========================================================================
470
+ 第二部分:子进程Worker
471
+ ========================================================================
472
+ """
473
+
474
  class GetNewBingHandle(Process):
475
  def __init__(self):
476
  super().__init__(daemon=True)
477
  self.parent, self.child = Pipe()
478
+ self.newbing_model = None
 
479
  self.info = ""
480
  self.success = True
481
+ self.local_history = []
482
  self.check_dependency()
483
  self.start()
484
 
 
492
  self.success = False
493
 
494
  def ready(self):
495
+ return self.newbing_model is not None
496
 
497
+ async def async_run(self, question, history):
498
+ # 读取配置
499
+ NEWBING_STYLE, = get_conf('NEWBING_STYLE')
500
+ from request_llm.bridge_all import model_info
501
+ endpoint = model_info['newbing']['endpoint']
502
+
503
+ # 开始问问题
504
+ self.local_history.append(question)
505
+ async for final, response in self.newbing_model.ask_stream(
506
  prompt=question,
507
+ conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
508
+ wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
509
  ):
510
  if not final:
511
+ self.child.send(str(response))
512
  print(response)
513
 
514
  def run(self):
515
  # 第一次运行,加载参数
516
  retry = 0
517
+ self.local_history = []
518
  while True:
519
  try:
520
+ if self.newbing_model is None:
521
  proxies, = get_conf('proxies')
522
+ NEWBING_COOKIES, = get_conf('NEWBING_COOKIES')
523
+ cookies = json.loads(NEWBING_COOKIES)
524
+ self.newbing_model = Chatbot(proxy=proxies['https'], cookies=cookies)
525
  break
526
  else:
527
  break
 
529
  retry += 1
530
  if retry > 3:
531
  self.child.send('[Local Message] 不能加载Newbing组件。')
532
+ self.success = False
533
  raise RuntimeError("不能加载Newbing组件。")
534
 
535
  # 进入任务等待状态
536
  while True:
537
  kwargs = self.child.recv()
538
  try:
539
+ asyncio.run(self.async_run(question=kwargs['query'], history=kwargs['history']))
540
  except:
541
  self.child.send('[Local Message] Newbing失败.')
542
  self.child.send('[Finish]')
 
551
  break
552
  return
553
 
554
+
555
+ """
556
+ ========================================================================
557
+ 第三部分:主进程统一调用函数接口
558
+ ========================================================================
559
+ """
560
+ global newbing_handle
561
+ newbing_handle = None
562
+
563
  def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
564
  """
565
  多线程方法
566
  函数的说明请见 request_llm/bridge_all.py
567
  """
568
+ global newbing_handle
569
+ if newbing_handle is None or (not newbing_handle.success):
570
+ newbing_handle = GetNewBingHandle()
571
+ observe_window[0] = load_message + "\n\n" + newbing_handle.info
572
+ if not newbing_handle.success:
573
+ error = newbing_handle.info
574
+ newbing_handle = None
575
  raise RuntimeError(error)
576
 
577
+ # 没有 sys_prompt 接口,因此把prompt加入 history
578
  history_feedin = []
579
  history_feedin.append(["What can I do?", sys_prompt])
580
  for i in range(len(history)//2):
 
582
 
583
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
584
  response = ""
585
+ for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
586
  observe_window[0] = response
587
  if len(observe_window) >= 2:
588
  if (time.time()-observe_window[1]) > watch_dog_patience:
 
598
  """
599
  chatbot.append((inputs, ""))
600
 
601
+ global newbing_handle
602
+ if newbing_handle is None or (not newbing_handle.success):
603
+ newbing_handle = GetNewBingHandle()
604
+ chatbot[-1] = (inputs, load_message + "\n\n" + newbing_handle.info)
605
  yield from update_ui(chatbot=chatbot, history=[])
606
+ if not newbing_handle.success:
607
+ newbing_handle = None
608
  return
609
 
610
  if additional_fn is not None:
 
619
  for i in range(len(history)//2):
620
  history_feedin.append([history[2*i], history[2*i+1]] )
621
 
622
+ for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
623
  chatbot[-1] = (inputs, response)
624
  yield from update_ui(chatbot=chatbot, history=history)
625