Spaces:
Sleeping
Sleeping
Shreyas094
commited on
Commit
•
6f35b05
1
Parent(s):
a45453a
Update app.py
Browse files
app.py
CHANGED
@@ -471,7 +471,7 @@ def get_response_from_excel(query, model, context, num_calls=3, temperature=0.2)
|
|
471 |
|
472 |
logging.info("Finished generating response for Excel data")
|
473 |
|
474 |
-
def truncate_context(context, max_chars=
|
475 |
"""Truncate context to a maximum number of characters."""
|
476 |
if len(context) <= max_chars:
|
477 |
return context
|
@@ -537,7 +537,7 @@ def get_response_from_llama(query, model, selected_docs, file_type, num_calls=1,
|
|
537 |
# Generate content with streaming enabled
|
538 |
for response in client.chat_completion(
|
539 |
messages=messages, # Pass messages in the required format
|
540 |
-
max_tokens=
|
541 |
temperature=temperature,
|
542 |
stream=True,
|
543 |
top_p=0.9,
|
|
|
471 |
|
472 |
logging.info("Finished generating response for Excel data")
|
473 |
|
474 |
+
def truncate_context(context, max_chars=20000):
|
475 |
"""Truncate context to a maximum number of characters."""
|
476 |
if len(context) <= max_chars:
|
477 |
return context
|
|
|
537 |
# Generate content with streaming enabled
|
538 |
for response in client.chat_completion(
|
539 |
messages=messages, # Pass messages in the required format
|
540 |
+
max_tokens=3000, # Reduced to ensure we stay within token limits
|
541 |
temperature=temperature,
|
542 |
stream=True,
|
543 |
top_p=0.9,
|