awacke1 commited on
Commit
f019b7a
β€’
1 Parent(s): 053774d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +203 -149
app.py CHANGED
@@ -53,7 +53,14 @@ st.set_page_config(
53
  }
54
  )
55
 
56
-
 
 
 
 
 
 
 
57
 
58
  # HTML5 based Speech Synthesis (Text to Speech in Browser)
59
  @st.cache_resource
@@ -359,6 +366,19 @@ def display_glossary_grid(roleplaying_glossary):
359
  st.markdown(f"**{term}** <small>{links_md}</small>", unsafe_allow_html=True)
360
 
361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  @st.cache_resource
363
  def get_table_download_link(file_path):
364
 
@@ -576,10 +596,26 @@ def FileSidebar():
576
 
577
  if next_action=='md':
578
  st.markdown(file_contents)
 
 
579
  buttonlabel = 'πŸ”Run'
580
  if st.button(key='Runmd', label = buttonlabel):
581
- user_prompt = file_contents
582
- #try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
  #search_glossary(file_contents)
584
  #except:
585
  #st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
@@ -1241,79 +1277,6 @@ def get_audio_download_link(file_path):
1241
 
1242
 
1243
 
1244
- # 🎡 Wav Audio files - Transcription History in Wav
1245
- audio_files = glob.glob("*.wav")
1246
- audio_files = [file for file in audio_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1247
- audio_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1248
-
1249
- # πŸ–Ό PNG Image files
1250
- image_files = glob.glob("*.png")
1251
- image_files = [file for file in image_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1252
- image_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1253
-
1254
- # πŸŽ₯ MP4 Video files
1255
- video_files = glob.glob("*.mp4")
1256
- video_files = [file for file in video_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1257
- video_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1258
-
1259
- # Delete All button for each file type
1260
- if st.sidebar.button("πŸ—‘ Delete All Audio"):
1261
- for file in audio_files:
1262
- os.remove(file)
1263
- st.rerun()
1264
-
1265
- if st.sidebar.button("πŸ—‘ Delete All Images"):
1266
- for file in image_files:
1267
- os.remove(file)
1268
- st.rerun()
1269
-
1270
- if st.sidebar.button("πŸ—‘ Delete All Videos"):
1271
- for file in video_files:
1272
- os.remove(file)
1273
- st.rerun()
1274
-
1275
- # Display and handle audio files
1276
- for file in audio_files:
1277
- col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1278
- with col1:
1279
- st.markdown(file)
1280
- if st.button("🎡", key="play_" + file): # play emoji button
1281
- audio_file = open(file, 'rb')
1282
- audio_bytes = audio_file.read()
1283
- st.audio(audio_bytes, format='audio/wav')
1284
- with col2:
1285
- if st.button("πŸ—‘", key="delete_" + file):
1286
- os.remove(file)
1287
- st.rerun()
1288
-
1289
- # Display and handle image files
1290
- for file in image_files:
1291
- col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1292
- with col1:
1293
- st.markdown(file)
1294
- if st.button("πŸ–Ό", key="show_" + file): # show emoji button
1295
- image = open(file, 'rb').read()
1296
- st.image(image)
1297
- with col2:
1298
- if st.button("πŸ—‘", key="delete_" + file):
1299
- os.remove(file)
1300
- st.rerun()
1301
-
1302
- # Display and handle video files
1303
- for file in video_files:
1304
- col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1305
- with col1:
1306
- st.markdown(file)
1307
- if st.button("πŸŽ₯", key="play_" + file): # play emoji button
1308
- video_file = open(file, 'rb')
1309
- video_bytes = video_file.read()
1310
- st.video(video_bytes)
1311
- with col2:
1312
- if st.button("πŸ—‘", key="delete_" + file):
1313
- os.remove(file)
1314
- st.rerun()
1315
-
1316
-
1317
 
1318
  GiveFeedback=False
1319
  if GiveFeedback:
@@ -1380,18 +1343,6 @@ def transcribe_canary(filename):
1380
  st.write(result)
1381
  return result
1382
 
1383
- # ChatBot client chat completions ------------------------- !!
1384
- def process_text2(MODEL='gpt-4o-2024-05-13', text_input='What is 2+2 and what is an imaginary number'):
1385
- if text_input:
1386
- completion = client.chat.completions.create(
1387
- model=MODEL,
1388
- messages=st.session_state.messages
1389
- )
1390
- return_text = completion.choices[0].message.content
1391
- st.write("Assistant: " + return_text)
1392
- filename = generate_filename(text_input, "md")
1393
- create_file(filename, text_input, return_text, should_save)
1394
- return return_text
1395
 
1396
  # Transcript to arxiv and client chat completion ------------------------- !!
1397
  filename = save_and_play_audio(audio_recorder)
@@ -1441,12 +1392,12 @@ if example_input:
1441
  for example_input in session_state["search_queries"]:
1442
  st.write(example_input)
1443
 
1444
- if st.button("Run Prompt", help="Click to run."):
1445
- try:
1446
- response=StreamLLMChatResponse(example_input)
1447
- create_file(filename, example_input, response, should_save)
1448
- except:
1449
- st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
1450
 
1451
  openai.api_key = os.getenv('OPENAI_API_KEY')
1452
  if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
@@ -1490,7 +1441,7 @@ if AddAFileForContext:
1490
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
1491
 
1492
 
1493
- # documentation
1494
  # 1. Cookbook: https://cookbook.openai.com/examples/gpt4o/introduction_to_gpt4o
1495
  # 2. Configure your Project and Orgs to limit/allow Models: https://platform.openai.com/settings/organization/general
1496
  # 3. Watch your Billing! https://platform.openai.com/settings/organization/billing/overview
@@ -1531,11 +1482,6 @@ def process_text(text_input):
1531
 
1532
  #st.write("Assistant: " + completion.choices[0].message.content)
1533
 
1534
-
1535
-
1536
-
1537
-
1538
-
1539
  def create_file(filename, prompt, response, is_image=False):
1540
  with open(filename, "w", encoding="utf-8") as f:
1541
  f.write(prompt + "\n\n" + response)
@@ -1607,12 +1553,13 @@ def process_image(image_input, user_prompt):
1607
 
1608
  return image_response
1609
 
1610
- def save_imageold(image_input, filename_txt):
1611
- # Save the uploaded video file
1612
- with open(filename_txt, "wb") as f:
1613
- f.write(image_input.getbuffer())
1614
- return image_input.name
1615
-
 
1616
 
1617
  def process_audio(audio_input, text_input):
1618
  if audio_input:
@@ -1620,15 +1567,20 @@ def process_audio(audio_input, text_input):
1620
  model="whisper-1",
1621
  file=audio_input,
1622
  )
1623
- response = client.chat.completions.create(
1624
- model=MODEL,
1625
- messages=[
1626
- {"role": "system", "content":{text_input}},
1627
- {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}],}
1628
- ],
1629
- temperature=0,
1630
- )
1631
- st.markdown(response.choices[0].message.content)
 
 
 
 
 
1632
 
1633
  def process_audio_for_video(video_input):
1634
  if video_input:
@@ -1733,55 +1685,134 @@ def main():
1733
  image_input = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
1734
  image_response = process_image(image_input, text_input)
1735
 
1736
-
1737
-
1738
  elif option == "Audio":
1739
- text = "Transcribe and answer questions as a helpful audio music and speech assistant. "
1740
- #text = "You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."
 
 
 
 
 
 
 
 
 
 
1741
  text_input = st.text_input(label="Enter text prompt to use with Audio context.", value=text)
1742
 
1743
- audio_input = st.file_uploader("Upload an audio file", type=["mp3", "wav"])
1744
- audio_response = process_audio(audio_input, text_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745
 
1746
  elif option == "Video":
1747
  video_input = st.file_uploader("Upload a video file", type=["mp4"])
1748
  process_audio_and_video(video_input)
1749
 
1750
- # Image and Video Galleries
1751
- num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=5)
1752
- display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
1753
 
1754
- num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=5)
1755
- display_videos_and_links(num_columns_video) # Video Jump Grid
 
 
 
 
1756
 
1757
 
1758
- # Optional UI's
1759
- showExtendedTextInterface=False
1760
- if showExtendedTextInterface:
1761
- display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
1762
- num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
1763
- display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
1764
- st.markdown(personality_factors)
1765
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766
 
1767
 
1768
 
1769
- # st.title("GPT-4o ChatBot")
1770
 
1771
- client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
1772
- MODEL = "gpt-4o-2024-05-13"
1773
- if "openai_model" not in st.session_state:
1774
- st.session_state["openai_model"] = MODEL
1775
- if "messages" not in st.session_state:
1776
- st.session_state.messages = []
1777
- if st.button("Clear Session"):
1778
- st.session_state.messages = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1779
 
1780
- current_messages=[]
1781
- for message in st.session_state.messages:
1782
- with st.chat_message(message["role"]):
1783
- current_messages.append(message)
1784
- st.markdown(message["content"])
1785
 
1786
  # ChatBot Entry
1787
  if prompt := st.chat_input("GPT-4o Multimodal ChatBot - What can I help you with?"):
@@ -1797,5 +1828,28 @@ if prompt := st.chat_input("GPT-4o Multimodal ChatBot - What can I help you with
1797
  response = process_text2(text_input=prompt)
1798
  st.session_state.messages.append({"role": "assistant", "content": response})
1799
 
1800
- if __name__ == "__main__":
1801
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  }
54
  )
55
 
56
+ client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
57
+ MODEL = "gpt-4o-2024-05-13"
58
+ if "openai_model" not in st.session_state:
59
+ st.session_state["openai_model"] = MODEL
60
+ if "messages" not in st.session_state:
61
+ st.session_state.messages = []
62
+ if st.button("Clear Session"):
63
+ st.session_state.messages = []
64
 
65
  # HTML5 based Speech Synthesis (Text to Speech in Browser)
66
  @st.cache_resource
 
366
  st.markdown(f"**{term}** <small>{links_md}</small>", unsafe_allow_html=True)
367
 
368
 
369
+ # ChatBot client chat completions ------------------------- !!
370
+ def process_text2(MODEL='gpt-4o-2024-05-13', text_input='What is 2+2 and what is an imaginary number'):
371
+ if text_input:
372
+ completion = client.chat.completions.create(
373
+ model=MODEL,
374
+ messages=st.session_state.messages
375
+ )
376
+ return_text = completion.choices[0].message.content
377
+ st.write("Assistant: " + return_text)
378
+ filename = generate_filename(text_input, "md")
379
+ create_file(filename, text_input, return_text, should_save)
380
+ return return_text
381
+
382
  @st.cache_resource
383
  def get_table_download_link(file_path):
384
 
 
596
 
597
  if next_action=='md':
598
  st.markdown(file_contents)
599
+ SpeechSynthesis(file_contents)
600
+
601
  buttonlabel = 'πŸ”Run'
602
  if st.button(key='Runmd', label = buttonlabel):
603
+ MODEL = "gpt-4o-2024-05-13"
604
+ openai.api_key = os.getenv('OPENAI_API_KEY')
605
+ openai.organization = os.getenv('OPENAI_ORG_ID')
606
+ client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
607
+ st.session_state.messages.append({"role": "user", "content": transcript})
608
+ with st.chat_message("user"):
609
+ st.markdown(transcript)
610
+ with st.chat_message("assistant"):
611
+ completion = client.chat.completions.create(
612
+ model=MODEL,
613
+ messages = st.session_state.messages,
614
+ stream=True
615
+ )
616
+ response = process_text2(text_input=prompt)
617
+ st.session_state.messages.append({"role": "assistant", "content": response})
618
+ #try:
619
  #search_glossary(file_contents)
620
  #except:
621
  #st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
 
1277
 
1278
 
1279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280
 
1281
  GiveFeedback=False
1282
  if GiveFeedback:
 
1343
  st.write(result)
1344
  return result
1345
 
 
 
 
 
 
 
 
 
 
 
 
 
1346
 
1347
  # Transcript to arxiv and client chat completion ------------------------- !!
1348
  filename = save_and_play_audio(audio_recorder)
 
1392
  for example_input in session_state["search_queries"]:
1393
  st.write(example_input)
1394
 
1395
+ #if st.button("Run Prompt", help="Click to run."):
1396
+ # try:
1397
+ # response=StreamLLMChatResponse(example_input)
1398
+ # create_file(filename, example_input, response, should_save)
1399
+ # except:
1400
+ # st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
1401
 
1402
  openai.api_key = os.getenv('OPENAI_API_KEY')
1403
  if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
 
1441
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
1442
 
1443
 
1444
+ # GPT4o documentation
1445
  # 1. Cookbook: https://cookbook.openai.com/examples/gpt4o/introduction_to_gpt4o
1446
  # 2. Configure your Project and Orgs to limit/allow Models: https://platform.openai.com/settings/organization/general
1447
  # 3. Watch your Billing! https://platform.openai.com/settings/organization/billing/overview
 
1482
 
1483
  #st.write("Assistant: " + completion.choices[0].message.content)
1484
 
 
 
 
 
 
1485
  def create_file(filename, prompt, response, is_image=False):
1486
  with open(filename, "w", encoding="utf-8") as f:
1487
  f.write(prompt + "\n\n" + response)
 
1553
 
1554
  return image_response
1555
 
1556
+ def create_audio_file(filename, audio_data, should_save):
1557
+ if should_save:
1558
+ with open(filename, "wb") as file:
1559
+ file.write(audio_data.getvalue())
1560
+ st.success(f"Audio file saved as {filename}")
1561
+ else:
1562
+ st.warning("Audio file not saved.")
1563
 
1564
  def process_audio(audio_input, text_input):
1565
  if audio_input:
 
1567
  model="whisper-1",
1568
  file=audio_input,
1569
  )
1570
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
1571
+ with st.chat_message("assistant"):
1572
+ st.markdown(transcription.text)
1573
+
1574
+ SpeechSynthesis(transcription.text)
1575
+ filename = generate_filename(transcription.text, "wav")
1576
+
1577
+ create_audio_file(filename, audio_input, should_save)
1578
+
1579
+ #SpeechSynthesis(transcription.text)
1580
+
1581
+ filename = generate_filename(transcription.text, "md")
1582
+ create_file(filename, transcription.text, transcription.text, should_save)
1583
+ #st.markdown(response.choices[0].message.content)
1584
 
1585
  def process_audio_for_video(video_input):
1586
  if video_input:
 
1685
  image_input = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
1686
  image_response = process_image(image_input, text_input)
1687
 
 
 
1688
  elif option == "Audio":
1689
+ text = "You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."
1690
+ text_input = st.text_input(label="Enter text prompt to use with Audio context.", value=text)
1691
+ uploaded_files = st.file_uploader("Upload an audio file", type=["mp3", "wav"], accept_multiple_files=True)
1692
+
1693
+ for audio_input in uploaded_files:
1694
+ st.write(audio_input.name)
1695
+ if audio_input is not None:
1696
+ process_audio(audio_input, text_input)
1697
+
1698
+ elif option == "Audio old":
1699
+ #text = "Transcribe and answer questions as a helpful audio music and speech assistant. "
1700
+ text = "You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."
1701
  text_input = st.text_input(label="Enter text prompt to use with Audio context.", value=text)
1702
 
1703
+ uploaded_files = st.file_uploader("Upload an audio file", type=["mp3", "wav"], accept_multiple_files=True)
1704
+ for audio_input in uploaded_files:
1705
+ st.write(audio_input.name)
1706
+
1707
+ if audio_input is not None:
1708
+ # To read file as bytes:
1709
+ bytes_data = uploaded_file.getvalue()
1710
+ #st.write(bytes_data)
1711
+
1712
+ # To convert to a string based IO:
1713
+ #stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
1714
+ #st.write(stringio)
1715
+
1716
+ # To read file as string:
1717
+ #string_data = stringio.read()
1718
+ #st.write(string_data)
1719
+
1720
+ process_audio(audio_input, text_input)
1721
 
1722
  elif option == "Video":
1723
  video_input = st.file_uploader("Upload a video file", type=["mp4"])
1724
  process_audio_and_video(video_input)
1725
 
 
 
 
1726
 
1727
+ # Enter the GPT-4o omni model in streamlit chatbot
1728
+ current_messages=[]
1729
+ for message in st.session_state.messages:
1730
+ with st.chat_message(message["role"]):
1731
+ current_messages.append(message)
1732
+ st.markdown(message["content"])
1733
 
1734
 
 
 
 
 
 
 
 
1735
 
1736
+ # 🎡 Wav Audio files - Transcription History in Wav
1737
+ audio_files = glob.glob("*.wav")
1738
+ audio_files = [file for file in audio_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1739
+ audio_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1740
+
1741
+ # πŸ–Ό PNG Image files
1742
+ image_files = glob.glob("*.png")
1743
+ image_files = [file for file in image_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1744
+ image_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1745
+
1746
+ # πŸŽ₯ MP4 Video files
1747
+ video_files = glob.glob("*.mp4")
1748
+ video_files = [file for file in video_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
1749
+ video_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
1750
 
1751
 
1752
 
 
1753
 
1754
+ main()
1755
+
1756
+ # Delete All button for each file type
1757
+ if st.sidebar.button("πŸ—‘ Delete All Audio"):
1758
+ for file in audio_files:
1759
+ os.remove(file)
1760
+ st.rerun()
1761
+
1762
+ if st.sidebar.button("πŸ—‘ Delete All Images"):
1763
+ for file in image_files:
1764
+ os.remove(file)
1765
+ st.rerun()
1766
+
1767
+ if st.sidebar.button("πŸ—‘ Delete All Videos"):
1768
+ for file in video_files:
1769
+ os.remove(file)
1770
+ st.rerun()
1771
+
1772
+ # Display and handle audio files
1773
+ for file in audio_files:
1774
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1775
+ with col1:
1776
+ st.markdown(file)
1777
+ if st.button("🎡", key="play_" + file): # play emoji button
1778
+ audio_file = open(file, 'rb')
1779
+ audio_bytes = audio_file.read()
1780
+ st.audio(audio_bytes, format='audio/wav')
1781
+ with col2:
1782
+ if st.button("πŸ—‘", key="delete_" + file):
1783
+ os.remove(file)
1784
+ st.rerun()
1785
+
1786
+ # Display and handle image files
1787
+ for file in image_files:
1788
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1789
+ with col1:
1790
+ st.markdown(file)
1791
+ if st.button("πŸ–Ό", key="show_" + file): # show emoji button
1792
+ image = open(file, 'rb').read()
1793
+ st.image(image)
1794
+ with col2:
1795
+ if st.button("πŸ—‘", key="delete_" + file):
1796
+ os.remove(file)
1797
+ st.rerun()
1798
+
1799
+ # Display and handle video files
1800
+ for file in video_files:
1801
+ col1, col2 = st.sidebar.columns([6, 1]) # adjust the ratio as needed
1802
+ with col1:
1803
+ st.markdown(file)
1804
+ if st.button("πŸŽ₯", key="play_" + file): # play emoji button
1805
+ video_file = open(file, 'rb')
1806
+ video_bytes = video_file.read()
1807
+ st.video(video_bytes)
1808
+ with col2:
1809
+ if st.button("πŸ—‘", key="delete_" + file):
1810
+ os.remove(file)
1811
+ st.rerun()
1812
+
1813
+
1814
+
1815
 
 
 
 
 
 
1816
 
1817
  # ChatBot Entry
1818
  if prompt := st.chat_input("GPT-4o Multimodal ChatBot - What can I help you with?"):
 
1828
  response = process_text2(text_input=prompt)
1829
  st.session_state.messages.append({"role": "assistant", "content": response})
1830
 
1831
+
1832
+
1833
+
1834
+
1835
+ # Image and Video Galleries
1836
+ num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=3)
1837
+ display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
1838
+
1839
+ num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=3)
1840
+ display_videos_and_links(num_columns_video) # Video Jump Grid
1841
+
1842
+
1843
+ # Optional UI's
1844
+ showExtendedTextInterface=False
1845
+ if showExtendedTextInterface:
1846
+ display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
1847
+ num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
1848
+ display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
1849
+ st.markdown(personality_factors)
1850
+
1851
+
1852
+
1853
+
1854
+ #if __name__ == "__main__":
1855
+