Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -228,7 +228,7 @@ def chatbot(policy_name_dd,contract_text,progress=gr.Progress()):
|
|
228 |
out_file.write(trimmed_input)
|
229 |
"""
|
230 |
gpt_model = "gpt-4-1106-preview"
|
231 |
-
response = openai.
|
232 |
model=gpt_model,
|
233 |
temperature=0,
|
234 |
messages=[
|
@@ -243,18 +243,18 @@ def chatbot(policy_name_dd,contract_text,progress=gr.Progress()):
|
|
243 |
]
|
244 |
)
|
245 |
|
246 |
-
gpt_response = response.choices[0].message
|
247 |
|
248 |
tokens_used = response.usage
|
249 |
-
if gpt_model=="gpt-4":
|
250 |
-
question_cost = (tokens_used.
|
251 |
-
prompt_tokens = tokens_used.
|
252 |
-
completion_tokens = tokens_used.
|
253 |
-
|
254 |
else:
|
255 |
-
prompt_tokens = tokens_used.
|
256 |
-
completion_tokens = tokens_used.
|
257 |
-
question_cost = ((prompt_tokens / 1000
|
258 |
|
259 |
"""
|
260 |
with open('response.txt', 'w', encoding='utf-8') as out_file:
|
|
|
228 |
out_file.write(trimmed_input)
|
229 |
"""
|
230 |
gpt_model = "gpt-4-1106-preview"
|
231 |
+
response = openai.chat.completions.create(
|
232 |
model=gpt_model,
|
233 |
temperature=0,
|
234 |
messages=[
|
|
|
243 |
]
|
244 |
)
|
245 |
|
246 |
+
gpt_response = response.choices[0].message.content
|
247 |
|
248 |
tokens_used = response.usage
|
249 |
+
if gpt_model == "gpt-4":
|
250 |
+
question_cost = (tokens_used.total_tokens / 1000) * .03
|
251 |
+
prompt_tokens = tokens_used.prompt_tokens
|
252 |
+
completion_tokens = tokens_used.completion_tokens
|
253 |
+
|
254 |
else:
|
255 |
+
prompt_tokens = tokens_used.prompt_tokens
|
256 |
+
completion_tokens = tokens_used.completion_tokens
|
257 |
+
question_cost = ((prompt_tokens / 1000) * .01) + ((completion_tokens / 1000) * .03)
|
258 |
|
259 |
"""
|
260 |
with open('response.txt', 'w', encoding='utf-8') as out_file:
|