artistypl commited on
Commit
ac65081
1 Parent(s): 0d01495

Update chatllm.py

Browse files

增加调用 chatGPT 接口的逻辑

Files changed (1) hide show
  1. chatllm.py +28 -26
chatllm.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  from typing import Dict, List, Optional, Tuple, Union
3
 
4
  import torch
 
5
  from langchain.llms.base import LLM
6
  from langchain.llms.utils import enforce_stop_tokens
7
  from transformers import AutoModel, AutoTokenizer
@@ -51,7 +52,7 @@ def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
51
 
52
 
53
  class ChatLLM(LLM):
54
- max_token: int = 10000
55
  temperature: float = 0.1
56
  top_p = 0.9
57
  history = []
@@ -69,38 +70,39 @@ class ChatLLM(LLM):
69
  prompt: str,
70
  stop: Optional[List[str]] = None) -> str:
71
 
72
- if self.model == 'Minimax':
73
- import requests
74
 
75
- group_id = os.getenv('group_id')
76
- api_key = os.getenv('api_key')
77
-
78
- url = f'https://api.minimax.chat/v1/text/chatcompletion?GroupId={group_id}'
79
  headers = {
80
  "Authorization": f"Bearer {api_key}",
81
  "Content-Type": "application/json"
82
  }
83
- request_body = {
84
- "model": "abab5-chat",
85
- "tokens_to_generate": 512,
86
- 'messages': []
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  }
88
 
89
- for i in self.history:
90
- h_input = i[0]
91
- h_reply = i[1]
92
- request_body['messages'].append({
93
- "sender_type": "USER",
94
- "text": h_input
95
- })
96
- request_body['messages'].append({"sender_type": "BOT", "text": h_reply})
97
-
98
- request_body['messages'].append({"sender_type": "USER", "text": prompt})
99
- resp = requests.post(url, headers=headers, json=request_body)
100
- response = resp.json()['reply']
101
- # 将当次的ai回复内容加入messages
102
- request_body['messages'].append({"sender_type": "BOT", "text": response})
103
- self.history.append((prompt, response))
104
 
105
  else:
106
 
 
2
  from typing import Dict, List, Optional, Tuple, Union
3
 
4
  import torch
5
+ import requests
6
  from langchain.llms.base import LLM
7
  from langchain.llms.utils import enforce_stop_tokens
8
  from transformers import AutoModel, AutoTokenizer
 
52
 
53
 
54
  class ChatLLM(LLM):
55
+ max_token: int = 4000
56
  temperature: float = 0.1
57
  top_p = 0.9
58
  history = []
 
70
  prompt: str,
71
  stop: Optional[List[str]] = None) -> str:
72
 
73
+ if self.model == 'ChatGPT':
 
74
 
75
+ OPENAI_API_KEY = os.getenv('openai_api_key')
76
+ OPENAI_URL = "https://api.openai.com/v1/chat/completions"
77
+
 
78
  headers = {
79
  "Authorization": f"Bearer {api_key}",
80
  "Content-Type": "application/json"
81
  }
82
+
83
+ # 添加过往问答记录,实现连贯多轮对话
84
+ messages = [{"role": "system", "content": "You are a helpful assistant."}]
85
+ for data in self.history:
86
+ messages.extend([{"role": "user", "content": data[0]}, {"role": "assistant", "content": data[1]}])
87
+ messages.append({"role": "user", "content": prompt})
88
+
89
+ # 配置OPENAI模型参数
90
+ payload = {
91
+ "model": "gpt-3.5-turbo",
92
+ "messages": messages,
93
+ "temperature" : temperature,
94
+ "top_p": 1,
95
+ "n" : 1,
96
+ "stream": False,
97
+ "presence_penalty":0,
98
+ "frequency_penalty":0
99
  }
100
 
101
+ response = requests.post(url, headers=get_headers(OPENAI_API_KEY), json=payload)
102
+ result = response.choice[0].text
103
+
104
+ # 将当次的ai回复内容加入history
105
+ self.history.append((prompt, result))
 
 
 
 
 
 
 
 
 
 
106
 
107
  else:
108