artistypl commited on
Commit
238b484
1 Parent(s): 3aeeedb

Update chatllm.py

Browse files

fix bug: openai param

Files changed (1) hide show
  1. chatllm.py +9 -12
chatllm.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  from typing import Dict, List, Optional, Tuple, Union
3
 
4
  import torch
 
5
  import requests
6
  from langchain.llms.base import LLM
7
  from langchain.llms.utils import enforce_stop_tokens
@@ -87,18 +88,14 @@ class ChatLLM(LLM):
87
  messages.append({"role": "user", "content": prompt})
88
 
89
  # 配置OPENAI模型参数
90
- payload = {
91
- "model": "gpt-3.5-turbo",
92
- "messages": messages,
93
- "temperature" : self.temperature,
94
- "top_p": self.top_p,
95
- "n" : 1,
96
- "stream": False,
97
- "presence_penalty":0,
98
- "frequency_penalty":0
99
- }
100
-
101
- response = requests.post(OPENAI_URL, headers=headers, json=payload)
102
  result = response.choices[0].text
103
 
104
  # 将当次的ai回复内容加入history
 
2
  from typing import Dict, List, Optional, Tuple, Union
3
 
4
  import torch
5
+ import openai
6
  import requests
7
  from langchain.llms.base import LLM
8
  from langchain.llms.utils import enforce_stop_tokens
 
88
  messages.append({"role": "user", "content": prompt})
89
 
90
  # 配置OPENAI模型参数
91
+ response = openai.Completion.create(
92
+ model = 'gpt-3.5-turbo',
93
+ messages = messages,
94
+ temperature = self.temperature,
95
+ top_p = self.top_p,
96
+ presence_penalty = 0,
97
+ frequency_penalty = 0
98
+ )
 
 
 
 
99
  result = response.choices[0].text
100
 
101
  # 将当次的ai回复内容加入history