AhmadMustafa commited on
Commit
764338a
·
1 Parent(s): fc85021

update: 4o mini -> 4o

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -131,7 +131,7 @@ class TranscriptProcessor:
131
  client = OpenAI()
132
 
133
  completion = client.chat.completions.create(
134
- model="gpt-4o-mini",
135
  messages=[
136
  {"role": "system", "content": "You are a helpful assistant."},
137
  {"role": "user", "content": prompt},
@@ -295,7 +295,7 @@ Total takes: 2
295
  - [Take 1. <div id='topic' style="display: inline"> 20s at 25:45]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{1245}}&et={{1265}}&uid={{uid}}))
296
  - [Take 3 (Best). <div id='topic' style="display: inline"> 5s at 10:13 </div>]({link_start}://roll.ai/colab/1234aq_12314/51234151?st=613&et=618&uid=82314)"""
297
  completion = client.chat.completions.create(
298
- model="gpt-4o-mini",
299
  messages=[
300
  {
301
  "role": "system",
@@ -364,7 +364,7 @@ Format requirements:
364
  print(user_prompt)
365
 
366
  completion = client.chat.completions.create(
367
- model="gpt-4o-mini",
368
  messages=[
369
  {"role": "system", "content": system_prompt},
370
  {"role": "user", "content": user_prompt},
@@ -491,7 +491,7 @@ If the user provides the correct call type, use the correct_call_type function t
491
  messages.append({"role": "user", "content": message})
492
 
493
  completion = client.chat.completions.create(
494
- model="gpt-4o-mini",
495
  messages=messages,
496
  tools=tools,
497
  stream=True,
@@ -505,7 +505,7 @@ If the user provides the correct call type, use the correct_call_type function t
505
  tool_calls_detected = True
506
  # Handle tool calls without streaming
507
  response = client.chat.completions.create(
508
- model="gpt-4o-mini",
509
  messages=messages,
510
  tools=tools,
511
  )
@@ -534,7 +534,7 @@ If the user provides the correct call type, use the correct_call_type function t
534
 
535
  # Get final response after tool call
536
  final_response = client.chat.completions.create(
537
- model="gpt-4o-mini", messages=messages, stream=True
538
  )
539
 
540
  # Stream the final response
 
131
  client = OpenAI()
132
 
133
  completion = client.chat.completions.create(
134
+ model="gpt-4o",
135
  messages=[
136
  {"role": "system", "content": "You are a helpful assistant."},
137
  {"role": "user", "content": prompt},
 
295
  - [Take 1. <div id='topic' style="display: inline"> 20s at 25:45]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{1245}}&et={{1265}}&uid={{uid}}))
296
  - [Take 3 (Best). <div id='topic' style="display: inline"> 5s at 10:13 </div>]({link_start}://roll.ai/colab/1234aq_12314/51234151?st=613&et=618&uid=82314)"""
297
  completion = client.chat.completions.create(
298
+ model="gpt-4o",
299
  messages=[
300
  {
301
  "role": "system",
 
364
  print(user_prompt)
365
 
366
  completion = client.chat.completions.create(
367
+ model="gpt-4o",
368
  messages=[
369
  {"role": "system", "content": system_prompt},
370
  {"role": "user", "content": user_prompt},
 
491
  messages.append({"role": "user", "content": message})
492
 
493
  completion = client.chat.completions.create(
494
+ model="gpt-4o",
495
  messages=messages,
496
  tools=tools,
497
  stream=True,
 
505
  tool_calls_detected = True
506
  # Handle tool calls without streaming
507
  response = client.chat.completions.create(
508
+ model="gpt-4o",
509
  messages=messages,
510
  tools=tools,
511
  )
 
534
 
535
  # Get final response after tool call
536
  final_response = client.chat.completions.create(
537
+ model="gpt-4o", messages=messages, stream=True
538
  )
539
 
540
  # Stream the final response