lindsay-qu commited on
Commit
e1ce828
1 Parent(s): 3ed86fa

Update models/gpt4_model.py

Browse files
Files changed (1) hide show
  1. models/gpt4_model.py +41 -31
models/gpt4_model.py CHANGED
@@ -1,7 +1,11 @@
1
  from .base_model import BaseModel
2
 
3
  import openai
 
4
  from tqdm import tqdm
 
 
 
5
  class GPT4Model(BaseModel):
6
  def __init__(self,
7
  generation_model="gpt-4-vision-preview",
@@ -12,46 +16,52 @@ class GPT4Model(BaseModel):
12
  self.embedding_model = embedding_model
13
  self.temperature = temperature
14
 
15
- def respond(self, messages: list) -> str:
16
- try:
17
- response = openai.ChatCompletion.create(
18
- messages=messages,
19
- model=self.generation_model,
20
- temperature=self.temperature,
21
- max_tokens=1000,
22
- ).choices[0]['message']['content']
23
- except:
24
- try:
25
- response = openai.ChatCompletion.create(
26
- messages=messages,
27
- model=self.generation_model,
28
- temperature=self.temperature,
29
- max_tokens=1000,
30
- ).choices[0]['message']['content']
31
- except:
32
- try:
33
- response = openai.ChatCompletion.create(
34
- messages=messages,
35
- model=self.generation_model,
36
- temperature=self.temperature,
37
- max_tokens=1000,
38
- ).choices[0]['message']['content']
39
- except:
40
- response = "No answer was provided."
41
  # content = response.choices[0]['message']['content']
42
 
43
  return response
44
 
45
- def embedding(self, texts: list) -> list:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  data = []
47
  # print(f"{self.embedding_model} Embedding:")
48
  for i in range(0, len(texts), 2048):
49
  lower = i
50
  upper = min(i+2048, len(texts))
51
- data += openai.Embedding.create(input=texts[lower:upper],
52
  model=self.embedding_model
53
- )["data"]
54
-
55
- embeddings = [d["embedding"] for d in data]
56
 
57
  return embeddings
 
1
  from .base_model import BaseModel
2
 
3
  import openai
4
+ from openai import AsyncOpenAI, OpenAI
5
  from tqdm import tqdm
6
+ import asyncio
7
+ import os
8
+
9
  class GPT4Model(BaseModel):
10
  def __init__(self,
11
  generation_model="gpt-4-vision-preview",
 
16
  self.embedding_model = embedding_model
17
  self.temperature = temperature
18
 
19
+ async def respond_async(self, messages: list[dict]) -> str:
20
+ client = AsyncOpenAI(
21
+ api_key=os.environ["OPENAI_API_KEY"],
22
+ base_url=os.environ["OPENAI_API_BASE"]
23
+ )
24
+ print("start api call")
25
+ output = await client.chat.completions.create(
26
+ messages=messages,
27
+ model=self.generation_model,
28
+ temperature=self.temperature,
29
+ max_tokens=1000,
30
+ )
31
+ print("end api call")
32
+ response = output.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
33
  # content = response.choices[0]['message']['content']
34
 
35
  return response
36
 
37
+ def respond(self, messages: list[dict]) -> str:
38
+ client = OpenAI(
39
+ api_key=os.environ["OPENAI_API_KEY"],
40
+ base_url=os.environ["OPENAI_API_BASE"]
41
+ )
42
+ # OpenAI.api_key=os.environ["OPENAI_API_KEY"]
43
+ # OpenAI.api_base=os.environ["OPENAI_API_BASE"]
44
+ response = client.chat.completions.create(
45
+ messages=messages,
46
+ model=self.generation_model,
47
+ temperature=self.temperature,
48
+ max_tokens=1000,
49
+ ).choices[0].message.content
50
+ return response
51
+
52
+ def embedding(self, texts: list[str]) -> list[float]:
53
+ client = OpenAI(
54
+ api_key=os.environ["OPENAI_API_KEY"],
55
+ base_url=os.environ["OPENAI_API_BASE"]
56
+ )
57
  data = []
58
  # print(f"{self.embedding_model} Embedding:")
59
  for i in range(0, len(texts), 2048):
60
  lower = i
61
  upper = min(i+2048, len(texts))
62
+ data += client.embeddings.create(input=texts[lower:upper],
63
  model=self.embedding_model
64
+ ).data
65
+ embeddings = [d.embedding for d in data]
 
66
 
67
  return embeddings