Spaces:
Sleeping
Sleeping
robertselvam
commited on
Commit
•
84d9bdc
1
Parent(s):
657c6ff
Update app.py
Browse files
app.py
CHANGED
@@ -167,24 +167,14 @@ class KeyValueExtractor:
|
|
167 |
|
168 |
# Call OpenAI GPT-3.5-turbo
|
169 |
chat_completion = self.client.chat.completions.create(
|
170 |
-
model = "
|
171 |
messages = conversation,
|
172 |
max_tokens=1000,
|
173 |
temperature=0
|
174 |
)
|
175 |
response = chat_completion.choices[0].message.content
|
176 |
return response
|
177 |
-
|
178 |
-
# response = openai.Completion.create(
|
179 |
-
# engine="text-davinci-003", # You can choose a different engine as well
|
180 |
-
# temperature = 0,
|
181 |
-
# prompt=f"Get maximum count meaningfull key value pairs. content in backticks.```{content}```.",
|
182 |
-
# max_tokens=1000 # You can adjust the length of the response
|
183 |
-
# )
|
184 |
-
|
185 |
-
# # Extract and return the chatbot's reply
|
186 |
-
# result = response['choices'][0]['text'].strip()
|
187 |
-
# return result
|
188 |
except Exception as e:
|
189 |
# If an error occurs during the key-value extraction process, log the error
|
190 |
logging.error(f"Error while extracting key-value pairs: {e}")
|
|
|
167 |
|
168 |
# Call OpenAI GPT-3.5-turbo
|
169 |
chat_completion = self.client.chat.completions.create(
|
170 |
+
model = "ChatGPT",
|
171 |
messages = conversation,
|
172 |
max_tokens=1000,
|
173 |
temperature=0
|
174 |
)
|
175 |
response = chat_completion.choices[0].message.content
|
176 |
return response
|
177 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
except Exception as e:
|
179 |
# If an error occurs during the key-value extraction process, log the error
|
180 |
logging.error(f"Error while extracting key-value pairs: {e}")
|