Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -1339,8 +1339,9 @@ openai.api_key = os.getenv('OPENAI_API_KEY')
|
|
1339 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
1340 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1341 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
1342 |
-
|
1343 |
-
|
|
|
1344 |
|
1345 |
|
1346 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
@@ -1353,11 +1354,14 @@ document_responses = {}
|
|
1353 |
if uploaded_file is not None:
|
1354 |
file_content = read_file_content(uploaded_file, max_length)
|
1355 |
document_sections.extend(divide_document(file_content, max_length))
|
|
|
|
|
1356 |
if len(document_sections) > 0:
|
1357 |
if st.button("ποΈ View Upload"):
|
1358 |
st.markdown("**Sections of the uploaded file:**")
|
1359 |
for i, section in enumerate(list(document_sections)):
|
1360 |
st.markdown(f"**Section {i+1}**\n{section}")
|
|
|
1361 |
st.markdown("**Chat with the model:**")
|
1362 |
for i, section in enumerate(list(document_sections)):
|
1363 |
if i in document_responses:
|
@@ -1365,7 +1369,6 @@ if len(document_sections) > 0:
|
|
1365 |
else:
|
1366 |
if st.button(f"Chat about Section {i+1}"):
|
1367 |
st.write('Reasoning with your inputs...')
|
1368 |
-
#response = chat_with_model(user_prompt, section, model_choice)
|
1369 |
st.write('Response:')
|
1370 |
st.write(response)
|
1371 |
document_responses[i] = response
|
@@ -1373,18 +1376,18 @@ if len(document_sections) > 0:
|
|
1373 |
create_file(filename, user_prompt, response, should_save)
|
1374 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
1375 |
|
1376 |
-
if st.button('π¬ Chat'):
|
1377 |
-
st.write('Reasoning with your inputs...')
|
1378 |
-
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
1379 |
-
full_response = ''
|
1380 |
-
for prompt_section in user_prompt_sections:
|
1381 |
-
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
1382 |
-
full_response += response + '\n' # Combine the responses
|
1383 |
-
response = full_response
|
1384 |
-
st.write('Response:')
|
1385 |
-
st.write(response)
|
1386 |
-
filename = generate_filename(user_prompt, choice)
|
1387 |
-
create_file(filename, user_prompt, response, should_save)
|
1388 |
|
1389 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid
|
1390 |
display_videos_and_links() # Video Jump Grid
|
|
|
1339 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
1340 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1341 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
1342 |
+
|
1343 |
+
#model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
1344 |
+
#user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
1345 |
|
1346 |
|
1347 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
|
|
1354 |
if uploaded_file is not None:
|
1355 |
file_content = read_file_content(uploaded_file, max_length)
|
1356 |
document_sections.extend(divide_document(file_content, max_length))
|
1357 |
+
|
1358 |
+
|
1359 |
if len(document_sections) > 0:
|
1360 |
if st.button("ποΈ View Upload"):
|
1361 |
st.markdown("**Sections of the uploaded file:**")
|
1362 |
for i, section in enumerate(list(document_sections)):
|
1363 |
st.markdown(f"**Section {i+1}**\n{section}")
|
1364 |
+
|
1365 |
st.markdown("**Chat with the model:**")
|
1366 |
for i, section in enumerate(list(document_sections)):
|
1367 |
if i in document_responses:
|
|
|
1369 |
else:
|
1370 |
if st.button(f"Chat about Section {i+1}"):
|
1371 |
st.write('Reasoning with your inputs...')
|
|
|
1372 |
st.write('Response:')
|
1373 |
st.write(response)
|
1374 |
document_responses[i] = response
|
|
|
1376 |
create_file(filename, user_prompt, response, should_save)
|
1377 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
1378 |
|
1379 |
+
#if st.button('π¬ Chat'):
|
1380 |
+
# st.write('Reasoning with your inputs...')
|
1381 |
+
# user_prompt_sections = divide_prompt(user_prompt, max_length)
|
1382 |
+
# full_response = ''
|
1383 |
+
# for prompt_section in user_prompt_sections:
|
1384 |
+
# response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
1385 |
+
# full_response += response + '\n' # Combine the responses
|
1386 |
+
# response = full_response
|
1387 |
+
# st.write('Response:')
|
1388 |
+
# st.write(response)
|
1389 |
+
# filename = generate_filename(user_prompt, choice)
|
1390 |
+
# create_file(filename, user_prompt, response, should_save)
|
1391 |
|
1392 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid
|
1393 |
display_videos_and_links() # Video Jump Grid
|