NavyDevilDoc commited on
Commit
63221b9
Β·
verified Β·
1 Parent(s): f076cab

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +129 -116
src/app.py CHANGED
@@ -12,6 +12,7 @@ from openai import OpenAI
12
  from datetime import datetime
13
  from test_integration import run_tests
14
  from core.QuizEngine import QuizEngine
 
15
 
16
  # --- CONFIGURATION ---
17
  st.set_page_config(page_title="Navy AI Toolkit", page_icon="βš“", layout="wide")
@@ -26,12 +27,16 @@ if "roles" not in st.session_state:
26
  if "quiz_state" not in st.session_state:
27
  st.session_state.quiz_state = {
28
  "active": False, # Is a question currently displayed?
29
- "question_data": None, # The current acronym object
30
  "user_answer": "", # What the user typed
31
  "feedback": None, # The LLM's grading response
32
- "streak": 0 # Fun gamification metric
 
33
  }
34
 
 
 
 
35
  # --- FLATTENER LOGIC (Integrated) ---
36
  class OutlineProcessor:
37
  """Parses text outlines for the Flattener tool."""
@@ -183,6 +188,35 @@ with st.sidebar:
183
  )
184
 
185
  st.divider()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
  # Model Selector
188
  st.header("🧠 Intelligence")
@@ -230,18 +264,12 @@ with st.sidebar:
230
 
231
  if st.button("Run Integration Test"):
232
  with st.spinner("Running diagnostics..."):
233
- # Create a buffer to capture the text that would normally be printed
234
  f = io.StringIO()
235
-
236
- # Redirect 'print' statements to our buffer instead of the console
237
  try:
238
  with contextlib.redirect_stdout(f):
239
  run_tests()
240
-
241
- # Display the result in a code block for easy reading
242
  st.success("Tests Completed")
243
  st.code(f.getvalue(), language="text")
244
-
245
  except Exception as e:
246
  st.error(f"Test Execution Failed: {e}")
247
 
@@ -269,31 +297,32 @@ with tab1:
269
 
270
  # RAG Search
271
  context_txt = ""
272
- # 1. Default System Prompt (No RAG)
273
  sys_p = "You are a helpful AI assistant."
274
 
275
  if use_rag:
276
- with st.spinner("Searching Knowledge Base..."):
277
- docs = rag_engine.search_knowledge_base(prompt, st.session_state.username)
278
- if docs:
279
- # 2. Strict System Prompt (With RAG)
280
- # We relax the strictness slightly to allow for inference,
281
- # while still demanding evidence.
282
- sys_p = (
283
- "You are a Navy Document Analyst. "
284
- "You must answer the user's question based PRIMARILY on the provided Context. "
285
- "If the Context contains the answer, output it clearly. "
286
- "If the Context does NOT contain the answer, simply state: "
287
- "'I cannot find that specific information in the documents provided.'"
288
  )
 
 
 
 
 
 
 
 
 
 
 
289
 
290
- # 3. XML-Formatted Context Construction
291
- # This helps the model "see" the start and end of each chunk clearly.
292
- for i, d in enumerate(docs):
293
- src = d.metadata.get('source', 'Unknown')
294
- context_txt += f"<document index='{i+1}' source='{src}'>\n{d.page_content}\n</document>\n"
295
-
296
- # 4. Construct Final User Payload
297
  if context_txt:
298
  final_prompt = (
299
  f"User Question: {prompt}\n\n"
@@ -306,7 +335,6 @@ with tab1:
306
  # Generation
307
  with st.chat_message("assistant"):
308
  with st.spinner("Thinking..."):
309
- # Memory Window
310
  hist = [{"role":"system", "content":sys_p}] + st.session_state.messages[-6:-1] + [{"role":"user", "content":final_prompt}]
311
 
312
  resp, usage = query_model_universal(hist, 2000, model_choice, st.session_state.get("user_openai_key"))
@@ -337,59 +365,57 @@ with tab2:
337
 
338
  if uploaded_file:
339
  # Save temp
340
- temp_path = rag_engine.save_uploaded_file(uploaded_file)
341
 
342
  # ACTION BAR
343
  col_a, col_b, col_c = st.columns(3)
344
 
345
- # 1. ADD TO DB (With Strategy Selection)
346
  with col_a:
347
  chunk_strategy = st.selectbox(
348
  "Chunking Strategy",
349
- ["paragraph", "token"], # Removed 'page' as it is not implemented in new engine yet
350
  help="Paragraph: Standard. Token: Dense text.",
351
  key="chunk_selector"
352
  )
353
 
354
  if st.button("πŸ“₯ Add to Knowledge Base", type="primary"):
355
- with st.spinner("Ingesting..."):
356
- # Note: New engine uses internal Tesseract OCR, not GPT-4o Vision
357
- # so we don't pass vision flags or keys here anymore.
358
-
359
- ok, msg = rag_engine.ingest_file(
360
- file_path=temp_path,
361
- username=st.session_state.username,
362
- strategy=chunk_strategy
363
- )
364
-
365
- if ok:
366
- tracker.upload_user_db(st.session_state.username) # Auto-Sync
367
- st.success(msg)
368
- else:
369
- st.error(msg)
 
 
370
 
371
  # 2. SUMMARIZE
372
  with col_b:
373
- # Spacer to align buttons visually since col_a has a selectbox
374
  st.write("")
375
  st.write("")
376
  if st.button("πŸ“ Summarize Document"):
377
  with st.spinner("Reading & Summarizing..."):
378
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
379
- # Extract raw text first
380
  class FileObj:
381
  def __init__(self, p, n): self.path=p; self.name=n
382
  def read(self):
383
  with open(self.path, "rb") as f: return f.read()
384
 
385
- # Extraction
386
  raw = doc_loader.extract_text_from_file(
387
  FileObj(temp_path, uploaded_file.name),
388
  use_vision=use_vision, api_key=key
389
  )
390
 
391
- # Call LLM
392
- prompt = f"Summarize this document into a key executive brief:\n\n{raw[:20000]}" # Truncate for safety
393
  msgs = [{"role":"user", "content": prompt}]
394
  summ, usage = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
395
 
@@ -402,11 +428,9 @@ with tab2:
402
 
403
  # 3. FLATTEN
404
  with col_c:
405
- # Spacer to align buttons
406
  st.write("")
407
  st.write("")
408
 
409
- # We use a session state variable to store the result so it persists for the "Index" step
410
  if "flattened_result" not in st.session_state:
411
  st.session_state.flattened_result = None
412
 
@@ -414,7 +438,6 @@ with tab2:
414
  with st.spinner("Flattening..."):
415
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
416
 
417
- # A. Extract
418
  with open(temp_path, "rb") as f:
419
  class Wrapper:
420
  def __init__(self, data, n): self.data=data; self.name=n
@@ -423,11 +446,9 @@ with tab2:
423
  Wrapper(f.read(), uploaded_file.name), use_vision=use_vision, api_key=key
424
  )
425
 
426
- # B. Parse
427
  proc = OutlineProcessor(raw)
428
  items = proc.parse()
429
 
430
- # C. Flatten
431
  out_txt = []
432
  bar = st.progress(0)
433
  for i, item in enumerate(items):
@@ -437,35 +458,57 @@ with tab2:
437
  out_txt.append(res)
438
  bar.progress((i+1)/len(items))
439
 
440
- # D. Store Result in Session State
441
  final_flattened_text = "\n".join(out_txt)
442
  st.session_state.flattened_result = {
443
  "text": final_flattened_text,
444
  "source": f"{uploaded_file.name}_flat"
445
  }
446
- st.rerun() # Refresh to show the new result/buttons
447
 
448
- # Display Result & Index Option
449
  if st.session_state.flattened_result:
450
  res = st.session_state.flattened_result
451
  st.success("Flattening Complete!")
452
  st.text_area("Result", res["text"], height=200)
453
 
454
- # The New Button
455
  if st.button("πŸ“₯ Index This Flattened Version"):
456
- with st.spinner("Indexing Flattened Text..."):
457
- ok, msg = rag_engine.process_and_add_text(
458
- res["text"],
459
- res["source"],
460
- st.session_state.username
461
- )
462
- if ok:
463
- tracker.upload_user_db(st.session_state.username) # Sync!
464
- st.success(msg)
465
- else:
466
- st.error(msg)
 
 
 
 
 
467
 
468
  st.divider()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
 
470
  # === TAB 3: QUIZ MODE ===
471
  with tab3:
@@ -491,7 +534,7 @@ with tab3:
491
 
492
  st.divider()
493
 
494
- # 2. START BUTTON (Logic branches based on mode)
495
  if not qs["active"]:
496
  if st.button("πŸš€ Generate New Question", type="primary"):
497
 
@@ -510,7 +553,6 @@ with tab3:
510
 
511
  # MODE B: DOCUMENTS
512
  else:
513
- # Retry logic for the LLM's "SKIP" response
514
  valid_question_found = False
515
  attempts = 0
516
 
@@ -525,7 +567,6 @@ with tab3:
525
  300, model_choice, st.session_state.get("user_openai_key")
526
  )
527
 
528
- # If LLM liked the chunk, it gave us a question. If not, it said "SKIP".
529
  if "SKIP" not in question_text and len(question_text) > 10:
530
  valid_question_found = True
531
  qs["active"] = True
@@ -541,7 +582,6 @@ with tab3:
541
  if qs["active"]:
542
  st.markdown(f"### {qs['generated_question_text']}")
543
 
544
- # Hints for Doc Mode
545
  if "document" in qs.get("question_data", {}).get("type", ""):
546
  st.caption(f"Source: *{qs['question_data']['source_file']}*")
547
 
@@ -553,7 +593,6 @@ with tab3:
553
  with st.spinner("Grading..."):
554
  data = qs["question_data"]
555
 
556
- # BRANCH GRADING LOGIC
557
  if data["type"] == "acronym":
558
  prompt = quiz.construct_acronym_grading_prompt(
559
  data["term"], data["correct_definition"], user_ans
@@ -563,7 +602,6 @@ with tab3:
563
  qs["generated_question_text"], user_ans, data["context_text"]
564
  )
565
 
566
- # Get Grade
567
  msgs = [{"role": "user", "content": prompt}]
568
  grade, _ = query_model_universal(
569
  msgs, 500, model_choice, st.session_state.get("user_openai_key")
@@ -571,7 +609,6 @@ with tab3:
571
 
572
  qs["feedback"] = grade
573
 
574
- # Streak Logic
575
  if "GRADE:** PASS" in grade or "GRADE:** Pass" in grade:
576
  qs["streak"] += 1
577
  elif "GRADE:** FAIL" in grade:
@@ -579,53 +616,29 @@ with tab3:
579
 
580
  st.rerun()
581
 
582
- # 4. FEEDBACK AREA
583
  if qs["feedback"]:
 
584
  if "PASS" in qs["feedback"]:
585
  st.success("βœ… CORRECT")
586
  else:
587
- st.warning("⚠️ NEEDS IMPROVEMENT")
 
 
 
588
 
589
  st.markdown(qs["feedback"])
590
 
591
- # For documents, show the source text so you can learn
592
- if qs["question_data"]["type"] == "document":
 
 
 
593
  with st.expander("Show Source Text (Answer Key)"):
594
- st.info(qs["question_data"]["context_text"])
595
 
596
  if st.button("Next Question ➑️"):
597
  qs["active"] = False
598
  qs["question_data"] = None
599
  qs["feedback"] = None
600
- st.rerun()
601
-
602
- # 4. FEEDBACK DISPLAY
603
- if qs["feedback"]:
604
- st.divider()
605
- if "PASS" in qs["feedback"]:
606
- st.success("βœ… CORRECT")
607
- else:
608
- st.error("❌ INCORRECT")
609
-
610
- st.markdown(qs["feedback"])
611
- st.info(f"**Official Definition:** {qs['question_data']['correct_definition']}")
612
-
613
- if st.button("Next Question ➑️"):
614
- qs["active"] = False
615
- qs["question_data"] = None
616
- qs["feedback"] = None
617
- st.rerun()
618
-
619
- # DB MANAGER
620
- st.subheader("Database Management")
621
- docs = rag_engine.list_documents(st.session_state.username)
622
- if docs:
623
- for d in docs:
624
- c1, c2 = st.columns([4,1])
625
- c1.text(f"πŸ“„ {d['filename']} ({d['chunks']} chunks)")
626
- if c2.button("πŸ—‘οΈ", key=d['source']):
627
- rag_engine.delete_document(st.session_state.username, d['source'])
628
- tracker.upload_user_db(st.session_state.username)
629
- st.rerun()
630
- else:
631
- st.info("Database Empty.")
 
12
  from datetime import datetime
13
  from test_integration import run_tests
14
  from core.QuizEngine import QuizEngine
15
+ from core.PineconeManager import PineconeManager # FIXED: Added missing import
16
 
17
  # --- CONFIGURATION ---
18
  st.set_page_config(page_title="Navy AI Toolkit", page_icon="βš“", layout="wide")
 
27
  if "quiz_state" not in st.session_state:
28
  st.session_state.quiz_state = {
29
  "active": False, # Is a question currently displayed?
30
+ "question_data": None, # The current acronym/doc object
31
  "user_answer": "", # What the user typed
32
  "feedback": None, # The LLM's grading response
33
+ "streak": 0, # Fun gamification metric
34
+ "generated_question_text": ""
35
  }
36
 
37
+ if "active_index" not in st.session_state:
38
+ st.session_state.active_index = None
39
+
40
  # --- FLATTENER LOGIC (Integrated) ---
41
  class OutlineProcessor:
42
  """Parses text outlines for the Flattener tool."""
 
188
  )
189
 
190
  st.divider()
191
+
192
+ st.header("🌲 Pinecone Settings")
193
+ # Initialize Manager
194
+ pc_key = os.getenv("PINECONE_API_KEY")
195
+ if pc_key:
196
+ pm = PineconeManager(pc_key)
197
+ indexes = pm.list_indexes()
198
+
199
+ # 1. INDEX SELECTOR
200
+ selected_index = st.selectbox("Active Index", indexes)
201
+ st.session_state.active_index = selected_index
202
+
203
+ # 2. SAFETY CHECK VISUAL
204
+ if selected_index:
205
+ is_compatible = pm.check_dimension_compatibility(selected_index, 384)
206
+ if is_compatible:
207
+ st.caption("βœ… Dimensions Match (384)")
208
+ else:
209
+ st.error("❌ Dimension Mismatch! Do not use.")
210
+
211
+ # 3. CREATE NEW INDEX
212
+ with st.expander("Create New Index"):
213
+ new_idx_name = st.text_input("Index Name")
214
+ if st.button("Create"):
215
+ ok, msg = pm.create_index(new_idx_name)
216
+ if ok: st.success(msg); st.rerun()
217
+ else: st.error(msg)
218
+ else:
219
+ st.warning("No Pinecone Key Found")
220
 
221
  # Model Selector
222
  st.header("🧠 Intelligence")
 
264
 
265
  if st.button("Run Integration Test"):
266
  with st.spinner("Running diagnostics..."):
 
267
  f = io.StringIO()
 
 
268
  try:
269
  with contextlib.redirect_stdout(f):
270
  run_tests()
 
 
271
  st.success("Tests Completed")
272
  st.code(f.getvalue(), language="text")
 
273
  except Exception as e:
274
  st.error(f"Test Execution Failed: {e}")
275
 
 
297
 
298
  # RAG Search
299
  context_txt = ""
 
300
  sys_p = "You are a helpful AI assistant."
301
 
302
  if use_rag:
303
+ if not st.session_state.active_index:
304
+ st.error("⚠️ Please select an Active Index in the sidebar first.")
305
+ else:
306
+ with st.spinner("Searching Knowledge Base..."):
307
+ # FIXED: Added index_name parameter
308
+ docs = rag_engine.search_knowledge_base(
309
+ query=prompt,
310
+ username=st.session_state.username,
311
+ index_name=st.session_state.active_index
 
 
 
312
  )
313
+ if docs:
314
+ sys_p = (
315
+ "You are a Navy Document Analyst. "
316
+ "You must answer the user's question based PRIMARILY on the provided Context. "
317
+ "If the Context contains the answer, output it clearly. "
318
+ "If the Context does NOT contain the answer, simply state: "
319
+ "'I cannot find that specific information in the documents provided.'"
320
+ )
321
+ for i, d in enumerate(docs):
322
+ src = d.metadata.get('source', 'Unknown')
323
+ context_txt += f"<document index='{i+1}' source='{src}'>\n{d.page_content}\n</document>\n"
324
 
325
+ # Construct Payload
 
 
 
 
 
 
326
  if context_txt:
327
  final_prompt = (
328
  f"User Question: {prompt}\n\n"
 
335
  # Generation
336
  with st.chat_message("assistant"):
337
  with st.spinner("Thinking..."):
 
338
  hist = [{"role":"system", "content":sys_p}] + st.session_state.messages[-6:-1] + [{"role":"user", "content":final_prompt}]
339
 
340
  resp, usage = query_model_universal(hist, 2000, model_choice, st.session_state.get("user_openai_key"))
 
365
 
366
  if uploaded_file:
367
  # Save temp
368
+ temp_path = rag_engine.save_uploaded_file(uploaded_file, st.session_state.username)
369
 
370
  # ACTION BAR
371
  col_a, col_b, col_c = st.columns(3)
372
 
373
+ # 1. ADD TO DB
374
  with col_a:
375
  chunk_strategy = st.selectbox(
376
  "Chunking Strategy",
377
+ ["paragraph", "token"],
378
  help="Paragraph: Standard. Token: Dense text.",
379
  key="chunk_selector"
380
  )
381
 
382
  if st.button("πŸ“₯ Add to Knowledge Base", type="primary"):
383
+ if not st.session_state.active_index:
384
+ st.error("Please select an Active Index in the sidebar.")
385
+ else:
386
+ with st.spinner("Ingesting..."):
387
+ # FIXED: Added index_name parameter
388
+ ok, msg = rag_engine.ingest_file(
389
+ file_path=temp_path,
390
+ username=st.session_state.username,
391
+ index_name=st.session_state.active_index,
392
+ strategy=chunk_strategy
393
+ )
394
+
395
+ if ok:
396
+ tracker.upload_user_db(st.session_state.username) # Auto-Sync
397
+ st.success(msg)
398
+ else:
399
+ st.error(msg)
400
 
401
  # 2. SUMMARIZE
402
  with col_b:
 
403
  st.write("")
404
  st.write("")
405
  if st.button("πŸ“ Summarize Document"):
406
  with st.spinner("Reading & Summarizing..."):
407
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
 
408
  class FileObj:
409
  def __init__(self, p, n): self.path=p; self.name=n
410
  def read(self):
411
  with open(self.path, "rb") as f: return f.read()
412
 
 
413
  raw = doc_loader.extract_text_from_file(
414
  FileObj(temp_path, uploaded_file.name),
415
  use_vision=use_vision, api_key=key
416
  )
417
 
418
+ prompt = f"Summarize this document into a key executive brief:\n\n{raw[:20000]}"
 
419
  msgs = [{"role":"user", "content": prompt}]
420
  summ, usage = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
421
 
 
428
 
429
  # 3. FLATTEN
430
  with col_c:
 
431
  st.write("")
432
  st.write("")
433
 
 
434
  if "flattened_result" not in st.session_state:
435
  st.session_state.flattened_result = None
436
 
 
438
  with st.spinner("Flattening..."):
439
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
440
 
 
441
  with open(temp_path, "rb") as f:
442
  class Wrapper:
443
  def __init__(self, data, n): self.data=data; self.name=n
 
446
  Wrapper(f.read(), uploaded_file.name), use_vision=use_vision, api_key=key
447
  )
448
 
 
449
  proc = OutlineProcessor(raw)
450
  items = proc.parse()
451
 
 
452
  out_txt = []
453
  bar = st.progress(0)
454
  for i, item in enumerate(items):
 
458
  out_txt.append(res)
459
  bar.progress((i+1)/len(items))
460
 
 
461
  final_flattened_text = "\n".join(out_txt)
462
  st.session_state.flattened_result = {
463
  "text": final_flattened_text,
464
  "source": f"{uploaded_file.name}_flat"
465
  }
466
+ st.rerun()
467
 
 
468
  if st.session_state.flattened_result:
469
  res = st.session_state.flattened_result
470
  st.success("Flattening Complete!")
471
  st.text_area("Result", res["text"], height=200)
472
 
 
473
  if st.button("πŸ“₯ Index This Flattened Version"):
474
+ if not st.session_state.active_index:
475
+ st.error("Please select an Active Index in the sidebar.")
476
+ else:
477
+ with st.spinner("Indexing Flattened Text..."):
478
+ # FIXED: Added index_name parameter
479
+ ok, msg = rag_engine.process_and_add_text(
480
+ text=res["text"],
481
+ source_name=res["source"],
482
+ username=st.session_state.username,
483
+ index_name=st.session_state.active_index
484
+ )
485
+ if ok:
486
+ tracker.upload_user_db(st.session_state.username)
487
+ st.success(msg)
488
+ else:
489
+ st.error(msg)
490
 
491
  st.divider()
492
+
493
+ # DB MANAGER
494
+ st.subheader("Database Management")
495
+ # This reads from local cache so no index needed
496
+ docs = rag_engine.list_documents(st.session_state.username)
497
+
498
+ if docs:
499
+ for d in docs:
500
+ c1, c2 = st.columns([4,1])
501
+ c1.text(f"πŸ“„ {d['filename']} (Cached)")
502
+ if c2.button("πŸ—‘οΈ", key=d['source']):
503
+ if not st.session_state.active_index:
504
+ st.error("Select Index first.")
505
+ else:
506
+ # FIXED: Added index_name parameter
507
+ rag_engine.delete_document(st.session_state.username, d['source'], st.session_state.active_index)
508
+ tracker.upload_user_db(st.session_state.username)
509
+ st.rerun()
510
+ else:
511
+ st.info("Database Empty (No cached files found).")
512
 
513
  # === TAB 3: QUIZ MODE ===
514
  with tab3:
 
534
 
535
  st.divider()
536
 
537
+ # 2. START BUTTON
538
  if not qs["active"]:
539
  if st.button("πŸš€ Generate New Question", type="primary"):
540
 
 
553
 
554
  # MODE B: DOCUMENTS
555
  else:
 
556
  valid_question_found = False
557
  attempts = 0
558
 
 
567
  300, model_choice, st.session_state.get("user_openai_key")
568
  )
569
 
 
570
  if "SKIP" not in question_text and len(question_text) > 10:
571
  valid_question_found = True
572
  qs["active"] = True
 
582
  if qs["active"]:
583
  st.markdown(f"### {qs['generated_question_text']}")
584
 
 
585
  if "document" in qs.get("question_data", {}).get("type", ""):
586
  st.caption(f"Source: *{qs['question_data']['source_file']}*")
587
 
 
593
  with st.spinner("Grading..."):
594
  data = qs["question_data"]
595
 
 
596
  if data["type"] == "acronym":
597
  prompt = quiz.construct_acronym_grading_prompt(
598
  data["term"], data["correct_definition"], user_ans
 
602
  qs["generated_question_text"], user_ans, data["context_text"]
603
  )
604
 
 
605
  msgs = [{"role": "user", "content": prompt}]
606
  grade, _ = query_model_universal(
607
  msgs, 500, model_choice, st.session_state.get("user_openai_key")
 
609
 
610
  qs["feedback"] = grade
611
 
 
612
  if "GRADE:** PASS" in grade or "GRADE:** Pass" in grade:
613
  qs["streak"] += 1
614
  elif "GRADE:** FAIL" in grade:
 
616
 
617
  st.rerun()
618
 
619
+ # 4. FEEDBACK AREA (MERGED & FIXED)
620
  if qs["feedback"]:
621
+ st.divider()
622
  if "PASS" in qs["feedback"]:
623
  st.success("βœ… CORRECT")
624
  else:
625
+ if "FAIL" in qs["feedback"]:
626
+ st.error("❌ INCORRECT")
627
+ else:
628
+ st.warning("⚠️ PARTIAL / COMMENTARY")
629
 
630
  st.markdown(qs["feedback"])
631
 
632
+ # Display Correct Answer based on type
633
+ data = qs["question_data"]
634
+ if data["type"] == "acronym":
635
+ st.info(f"**Official Definition:** {data['correct_definition']}")
636
+ elif data["type"] == "document":
637
  with st.expander("Show Source Text (Answer Key)"):
638
+ st.info(data["context_text"])
639
 
640
  if st.button("Next Question ➑️"):
641
  qs["active"] = False
642
  qs["question_data"] = None
643
  qs["feedback"] = None
644
+ st.rerun()