Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -177,21 +177,18 @@ def openai_response(PROMPT):
|
|
177 |
st.title("Welcome to :red[AlexaPiro]!!🤖")
|
178 |
st.title("How can I help?")
|
179 |
|
180 |
-
|
181 |
-
|
182 |
-
('Random Questions', 'Questions based on custom CSV data')
|
183 |
-
)
|
184 |
-
|
185 |
if Usage == 'Questions based on custom CSV data':
|
186 |
st.text('''Upload any CSV file and ask questions based on the contents of the file.
|
187 |
-
|
188 |
Example:
|
189 |
If I have a CSV file having data on cars, I could ask:
|
190 |
-
|
191 |
-
|
192 |
-
The feature returns a SQL query based on your question and if the returned
|
|
|
193 |
|
194 |
-
option = ['
|
195 |
res = st.selectbox('Select the Upload_csv option:',option)
|
196 |
if res == 'Upload_csv':
|
197 |
uploaded_file = st.file_uploader("Add dataset (csv) ",type=['csv'])
|
@@ -268,6 +265,74 @@ if Usage == 'Questions based on custom CSV data':
|
|
268 |
except:
|
269 |
pass
|
270 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
|
272 |
elif Usage == 'Random Questions':
|
273 |
st.text('''You can ask me for:
|
|
|
177 |
st.title("Welcome to :red[AlexaPiro]!!🤖")
|
178 |
st.title("How can I help?")
|
179 |
|
180 |
+
option_ = ['Random Questions','Questions based on custom CSV data']
|
181 |
+
Usage = st.selectbox('Select an option:', option_)
|
|
|
|
|
|
|
182 |
if Usage == 'Questions based on custom CSV data':
|
183 |
st.text('''Upload any CSV file and ask questions based on the contents of the file.
|
|
|
184 |
Example:
|
185 |
If I have a CSV file having data on cars, I could ask:
|
186 |
+
- How many cars were manufactured each year between 2000 to 2008?
|
187 |
+
|
188 |
+
The feature returns a SQL query based on your question and if the returned
|
189 |
+
data has atleast 2 columns then a graph will also be returned.''')
|
190 |
|
191 |
+
option = ['Sample_Cars_csv','Upload_csv']
|
192 |
res = st.selectbox('Select the Upload_csv option:',option)
|
193 |
if res == 'Upload_csv':
|
194 |
uploaded_file = st.file_uploader("Add dataset (csv) ",type=['csv'])
|
|
|
265 |
except:
|
266 |
pass
|
267 |
|
268 |
+
elif res == "Sample_Cars_csv":
|
269 |
+
df = pd.read_csv('cars.csv')
|
270 |
+
col= df.columns
|
271 |
+
try:
|
272 |
+
columns = str((df.columns).tolist())
|
273 |
+
column = clean(columns)
|
274 |
+
st.write('Columns:' )
|
275 |
+
st.text(col)
|
276 |
+
except:
|
277 |
+
pass
|
278 |
+
|
279 |
+
temp = st.slider('Temperature: ', 0.0, 1.0, 0.0)
|
280 |
+
|
281 |
+
|
282 |
+
with st.form(key='columns_in_form2'):
|
283 |
+
col3, col4 = st.columns(2)
|
284 |
+
with col3:
|
285 |
+
userPrompt = st.text_area("Input Prompt",'Enter Natural Language Query')
|
286 |
+
submitButton = st.form_submit_button(label = 'Submit')
|
287 |
+
if submitButton:
|
288 |
+
try:
|
289 |
+
col_p ="Create SQL statement from instruction. "+ext+" " " (" + column +")." +" Request:" + userPrompt + "SQL statement:"
|
290 |
+
result = gpt3(col_p)
|
291 |
+
except:
|
292 |
+
results = gpt3(userPrompt)
|
293 |
+
st.success('loaded')
|
294 |
+
with col4:
|
295 |
+
try:
|
296 |
+
sqlOutput = st.text_area('SQL Query', value=gpt3(col_p))
|
297 |
+
warning(sqlOutput)
|
298 |
+
cars=pd.read_csv('cars.csv')
|
299 |
+
result_tab2=ps.sqldf(sqlOutput)
|
300 |
+
st.write(result_tab2)
|
301 |
+
with open("fewshot_matplot.txt", "r") as file:
|
302 |
+
text_plot = file.read()
|
303 |
+
|
304 |
+
result_tab = result_tab2.reset_index(drop=True)
|
305 |
+
result_tab_string = result_tab.to_string()
|
306 |
+
gr_prompt = text_plot + userPrompt + result_tab_string + "Plot graph for: "
|
307 |
+
|
308 |
+
if len(gr_prompt) > 4097:
|
309 |
+
st.write('OVERWHELMING DATA!!! You have given me more than 4097 tokens! ^_^')
|
310 |
+
st.write('As of today, the NLP model text-davinci-003 that I run on takes in inputs that have less than 4097 tokens. Kindly retry ^_^')
|
311 |
+
|
312 |
+
elif len(result_tab2.columns) < 2:
|
313 |
+
st.write("I need more data to conduct analysis and provide visualizations for you... ^_^")
|
314 |
+
|
315 |
+
else:
|
316 |
+
st.success("Plotting...")
|
317 |
+
response_graph = openai.Completion.create(
|
318 |
+
engine="text-davinci-003",
|
319 |
+
prompt = gr_prompt,
|
320 |
+
max_tokens=1024,
|
321 |
+
n=1,
|
322 |
+
stop=None,
|
323 |
+
temperature=0.5,
|
324 |
+
)
|
325 |
+
|
326 |
+
if response_graph['choices'][0]['text'] != "":
|
327 |
+
print(response_graph['choices'][0]['text'])
|
328 |
+
exec(response_graph['choices'][0]['text'])
|
329 |
+
|
330 |
+
else:
|
331 |
+
print('Retry! Graph could not be plotted *_*')
|
332 |
+
|
333 |
+
except:
|
334 |
+
pass
|
335 |
+
|
336 |
|
337 |
elif Usage == 'Random Questions':
|
338 |
st.text('''You can ask me for:
|