alibicer commited on
Commit
843757b
·
verified ·
1 Parent(s): 81e7d3c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -14
app.py CHANGED
@@ -16,7 +16,7 @@ client = OpenAI(api_key=OPENAI_API_KEY)
16
 
17
  def gpt_call(history, user_message,
18
  model="gpt-4o",
19
- max_tokens=1500, # Increased from 512 to 1500 to prevent truncation
20
  temperature=0.7,
21
  top_p=0.95):
22
  """
@@ -37,19 +37,28 @@ def gpt_call(history, user_message,
37
  # 3) Add the user's new message
38
  messages.append({"role": "user", "content": user_message})
39
 
40
- # 4) Call OpenAI API
41
- completion = client.chat.completions.create(
42
- model=model,
43
- messages=messages,
44
- max_tokens=max_tokens, # Increased to allow longer responses
45
- temperature=temperature,
46
- top_p=top_p
47
- )
48
-
49
- # 5) Ensure full response is returned without being cut off
50
- full_response = "".join(choice.message.content for choice in completion.choices).strip()
51
-
52
- return full_response
 
 
 
 
 
 
 
 
 
53
 
54
 
55
  def respond(user_message, history):
 
16
 
17
  def gpt_call(history, user_message,
18
  model="gpt-4o",
19
+ max_tokens=3000, # Increased to 3000 to prevent truncation
20
  temperature=0.7,
21
  top_p=0.95):
22
  """
 
37
  # 3) Add the user's new message
38
  messages.append({"role": "user", "content": user_message})
39
 
40
+ # 4) Call OpenAI API (with continuation handling)
41
+ full_response = ""
42
+ while True:
43
+ completion = client.chat.completions.create(
44
+ model=model,
45
+ messages=messages,
46
+ max_tokens=max_tokens, # Increased to allow longer responses
47
+ temperature=temperature,
48
+ top_p=top_p
49
+ )
50
+
51
+ response_part = completion.choices[0].message.content.strip()
52
+ full_response += " " + response_part
53
+
54
+ # If the response looks incomplete, force the AI to continue
55
+ if len(response_part) < max_tokens - 50: # Ensures near full completion
56
+ break # Stop loop if response is complete
57
+
58
+ # Add last response back into conversation history to continue it
59
+ messages.append({"role": "assistant", "content": response_part})
60
+
61
+ return full_response.strip()
62
 
63
 
64
  def respond(user_message, history):