Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -110,7 +110,7 @@ def call_llm(query):
|
|
| 110 |
"content": [
|
| 111 |
{
|
| 112 |
"type": "text",
|
| 113 |
-
"text": f"schema: {schema} \n\n You are an explaining the query and and more importantly the results to a non technical business person. Considering the schema above, think step by step and return explanation of the result and the query.\n\n query: {query} \n\n result : {result} ",
|
| 114 |
}
|
| 115 |
],
|
| 116 |
}
|
|
@@ -120,7 +120,7 @@ def call_llm(query):
|
|
| 120 |
model="gpt-4.1-nano",
|
| 121 |
messages=messages,
|
| 122 |
response_format=Explanation,
|
| 123 |
-
max_tokens=
|
| 124 |
temperature=0.1
|
| 125 |
)
|
| 126 |
res = response.choices[0].message.parsed
|
|
|
|
| 110 |
"content": [
|
| 111 |
{
|
| 112 |
"type": "text",
|
| 113 |
+
"text": f"schema: {schema} \n\n You are an explaining the query and and more importantly the results to a non technical business person, step by step in 2-3 lines. Considering the schema above, think step by step and return explanation of the result and the query.\n\n query: {query} \n\n result : {result} ",
|
| 114 |
}
|
| 115 |
],
|
| 116 |
}
|
|
|
|
| 120 |
model="gpt-4.1-nano",
|
| 121 |
messages=messages,
|
| 122 |
response_format=Explanation,
|
| 123 |
+
max_tokens=250,
|
| 124 |
temperature=0.1
|
| 125 |
)
|
| 126 |
res = response.choices[0].message.parsed
|