Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ client = OpenAI(api_key=OPENAI_API_KEY)
|
|
16 |
|
17 |
def gpt_call(history, user_message,
|
18 |
model="gpt-4o",
|
19 |
-
max_tokens=
|
20 |
temperature=0.7,
|
21 |
top_p=0.95):
|
22 |
"""
|
@@ -37,19 +37,28 @@ def gpt_call(history, user_message,
|
|
37 |
# 3) Add the user's new message
|
38 |
messages.append({"role": "user", "content": user_message})
|
39 |
|
40 |
-
# 4) Call OpenAI API
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
def respond(user_message, history):
|
|
|
16 |
|
17 |
def gpt_call(history, user_message,
|
18 |
model="gpt-4o",
|
19 |
+
max_tokens=3000, # Increased to 3000 to prevent truncation
|
20 |
temperature=0.7,
|
21 |
top_p=0.95):
|
22 |
"""
|
|
|
37 |
# 3) Add the user's new message
|
38 |
messages.append({"role": "user", "content": user_message})
|
39 |
|
40 |
+
# 4) Call OpenAI API (with continuation handling)
|
41 |
+
full_response = ""
|
42 |
+
while True:
|
43 |
+
completion = client.chat.completions.create(
|
44 |
+
model=model,
|
45 |
+
messages=messages,
|
46 |
+
max_tokens=max_tokens, # Increased to allow longer responses
|
47 |
+
temperature=temperature,
|
48 |
+
top_p=top_p
|
49 |
+
)
|
50 |
+
|
51 |
+
response_part = completion.choices[0].message.content.strip()
|
52 |
+
full_response += " " + response_part
|
53 |
+
|
54 |
+
# If the response looks incomplete, force the AI to continue
|
55 |
+
if len(response_part) < max_tokens - 50: # Ensures near full completion
|
56 |
+
break # Stop loop if response is complete
|
57 |
+
|
58 |
+
# Add last response back into conversation history to continue it
|
59 |
+
messages.append({"role": "assistant", "content": response_part})
|
60 |
+
|
61 |
+
return full_response.strip()
|
62 |
|
63 |
|
64 |
def respond(user_message, history):
|