# ========================================================================= # Imports # ========================================================================= import os, time, re, sys import numpy as np from openai import OpenAI import streamlit as st import streamlit.components.v1 as components import urllib.parse # ========================================================================= # AI Response Dialog # ========================================================================= @st.dialog("🤖 Response") def AI_gen_response(prompt): MIN_PROMPT_LENGTH = 6 if len(prompt.strip()) < MIN_PROMPT_LENGTH: print("ERROR: AI prompt too short") st.markdown("*Your input is too short, please retype ...*") # clear output and print statement in italics return # AI API Call with st.spinner("Thinking ...", show_time=True): st.markdown("*Thinking about your response ...*") # clear output and print statement in italics full_text = [] client = st.session_state["openai_client"] stream = client.chat.completions.create( model = st.session_state["openai_model"], messages = [{"role": "user", "content": prompt}], # {"role": "admin", "content": admin_prompt}, stream = True, ) for chunk in stream: # chunk is a ChatCompletionChunk delta = chunk.choices[0].delta # “what changed since the prev. chunk” if delta.content: # sometimes None (e.g. first chunk) print(delta.content, end="", flush=True) # live printing full_text.append(delta.content) # keep it if you need it response = "".join(full_text) # the entire response as one string format_text(response, input_directory="") # there is no input directory for the OpenAI API return response # return the response for more programming if needed # ========================================================================= # Symbolic Lookup Table - Greek (lower and upper, math and equality signs) # ========================================================================= SYMBOLS: dict[str, str] = { r"\alpha": "α", r"\beta": "β", r"\gamma": "γ", r"\delta": "δ",r"\epsilon": "ε",r"\zeta": "ζ", r"\eta": "η", r"\theta": "θ", r"\iota": "ι", r"\kappa": "κ", r"\lambda": "λ", r"\mu": "μ", r"\nu": "ν", r"\xi": "ξ", r"\omicron": "ο", r"\pi": "π", r"\rho": "ρ", r"\sigma": "σ", r"\tau": "τ", r"\upsilon": "υ", r"\phi": "φ", r"\chi": "χ", r"\psi": "ψ", r"\omega": "ω", r"\Alpha": "Α", r"\Beta": "Β", r"\Gamma": "Γ", r"\Delta": "Δ", r"\Epsilon": "Ε",r"\Zeta": "Ζ", r"\Eta": "Η", r"\Theta": "Θ", r"\Iota": "Ι", r"\Kappa": "Κ", r"\Lambda": "Λ", r"\Mu": "Μ", r"\Nu": "Ν", r"\Xi": "Ξ", r"\Omicron": "Ο", r"\Pi": "Π", r"\Rho": "Ρ", r"\Sigma": "Σ", r"\Tau": "Τ", r"\Upsilon": "Υ", r"\Phi": "Φ", r"\Chi": "Χ", r"\Psi": "Ψ", r"\Omega": "Ω", r"\pm": "±", r"\times": "×", r"\div": "÷", r"\cdot": "·", r"\sqrt": "√", r"\sum": "∑", r"\prod": "∏", r"\int": "∫", r"\infty": "∞", r"\partial": "∂", r"\nabla": "∇", r"\le": "≤", r"\ge": "≥", r"\neq": "≠", r"\approx": "≈", r"\and": "∧", r"\or": "∨", r"\neg": "¬", r"\in": "∈", r"\notin": "∉", r"\subset": "⊂", r"\subseteq": "⊆",r"\supset": "⊃", r"\supseteq": "⊇", r"\cup": "∪", r"\cap": "∩", r"\angle": "∠", r"\triangle": "△", r"\therefore": "∴", r"\because": "∵", r"\propto": "∝", r"\parallel": "∥",r"\perp": "⊥", r"\degree": "°", r"\bullet": "•", r"\rightarrow": "→", r"\leftarrow": "←", r"\uparrow": "↑", r"\downarrow": "↓", r"\leftrightarrow": "↔", r"\ell": "ℓ", r"\Re": "ℜ", r"\Im": "ℑ", r"\aleph": "ℵ", r"\wp": "℘" } # ===================================================== # Functions - to convert symbol tags to symbols # ===================================================== FORMULA_REGEX = re.compile(r'\\[A-Za-z]+') BOLD_TXT_REGEX = re.compile(r'\\[A-Za-z]+{([^{}]*)}') def test_load(): # print("load_pages - testing external call") # DEBUG pass def latex_to_unicode(text: str, table: dict[str, str] = SYMBOLS) -> str: return FORMULA_REGEX.sub(lambda m: table.get(m.group(0), m.group(0)), text) def latex_bold_to_markdown(text: str) -> str: return BOLD_TXT_REGEX.sub(r'**\1**', text) # ===================================================================================== # format_text - loads a text file, formats it and places streamlit components # based on instructions from the text file (lesson/learning module) file # ===================================================================================== def format_text(input_text, input_directory=""): # -------------------------------------------------------------------------------- # Uses text file as input - input_text # within the text are instructional tags used to build the content for the page # This code parses the text line-by-line to determine what to display for the page # -------------------------------------------------------------------------------- # Parse the raw text for certain backslashes for mathematical formulae and # instruct the program to call the st.latex function to render input_text = input_text.replace("\\[\n",'st.latex("') input_text = input_text.replace("\\[",'st.latex("') input_text = input_text.replace("\n\\]\n",'")\n') input_text = input_text.replace("\\]\n",'")\n') # Split the text line by line to parse lines = input_text.split("\n") idx = 0 # Scan each line while(idx", height=600, width = 700, scrolling=True) # --------------------------------------------------------------------- # A Cognitive Checkpoint is a point in the module where the student is asked to # write a briefing that demonstrates whether they understand a concept # The briefing is sent to the AI for evaluation # Constructing this as a multi-line input gives the ability to enhance the admin prompt # which will need custom instructions based on the checkpoint # --------------------------------------------------------------------- elif line[:len("Cognitive Checkpoint:")]=="Cognitive Checkpoint:": question_key = line[23:31] # Unique key for response question = "" # Multi-line question for cognitive response for c, question_line in enumerate(lines[idx+1:]): if question_line[:len("End Cognitive Checkpoint:")]=="End Cognitive Checkpoint:": idx+=1 # skip this line break else: question += question_line + "\n" idx+=c # Set global counter to skip past the lines already read in lookahead mode st.write(question) # Write question # Provide area to write response with st.expander(label = "Cognitive Checkpoint:", expanded=True, icon=":material/live_help:"): if answer_text := st.text_area(label="Answer to Question", value="", height=150, key=question_key): # := is the "Walrus Operator - assigns, checks and runs function" admin_prompt = ''' You are a teacher. Evaluate the following statement delimited in square brackets for accuracy from your student. For each logical statement, grade it as one of three options: correct, misunderstanding, incorrect. For each statement cite the grade, student comment, your response, corrected response if required. Provide a summary at the end, not in table form, with the grade and a summary of the student comment ''' # Return your assessment in JSON format # Optional - for eval purposes AI_question = admin_prompt + "[" + answer_text + "]" try: AI_gen_response(prompt=AI_question) except Exception as e: print("ERROR: Dialog box open. Message suppressed\n",str(e)) # ------------------------------------------ # DEFAULT Instructions if no tag is provided # ------------------------------------------ else: line_unicode = latex_to_unicode(line) # convert text to unicode symbols if needed line_bold = latex_bold_to_markdown(line_unicode) # bold text # print(line_bold) st.markdown(line_bold) # markdown that line # ------------------------------------------ # Increment line counter / go to next line # ------------------------------------------ idx+=1 # END Parse text return # ===================================================== # Load Content # ===================================================== def load_content(module): input_directory = os.path.join(st.session_state["start_dir"],"pages","modules",module) # if file exists - otherwise do nothing content_str = open(os.path.join(input_directory,"Overview.txt"), "r", encoding="UTF-8").read() tab_content = content_str.split("---\n") # Display Module title if one is given, otherwise blank title = tab_content[0].strip() if len(title)>0: st.subheader(title) # Draw tabs according to page count tab_list = ["🚀"+str(idx+1) for idx,x in enumerate(tab_content[1:])] tabs = st.tabs(tab_list) # For each tab, render the appropriate content for idx,tab in enumerate(tabs): with tab: format_text(tab_content[idx+1], input_directory) # Add AI Bot Interface to bottom of page ## if prompt := st.chat_input("I would like to learn about ..."): ## AI_gen_response(prompt=prompt) return # ===================================================== # Load Page # ===================================================== def load_page(module): if "logged_in" not in st.session_state: # User is not logged in - don't load content return # -------------------------------------------------- # Sidebar # -------------------------------------------------- asset_dir = os.path.join(st.session_state["start_dir"],"pages","assets") with st.sidebar: with st.expander("Lessons", expanded=True, icon=":material/library_books:"): st.page_link("pages/Overview.py", label="The ASCEND User Interface", icon=":material/rocket_launch:") st.page_link("pages/Module_01.py", label="Basic Training", icon=":material/school:") st.page_link("pages/Module_02.py", label="Mathematical Foundations", icon=":material/difference:") st.page_link("pages/Module_03.py", label="The Normal Distribution", icon=":material/cadence:") st.page_link("pages/Module_04.py", label="One Sided Tests", icon=":material/compare:") st.page_link("pages/Module_05.py", label="Tests for Two Samples", icon=":material/speed_2x:") st.page_link("pages/Module_06.py", label="Paired Tests", icon=":material/speed_2x:") st.page_link("pages/Module_07.py", label="Testing Independence", icon=":material/superscript:") st.page_link("pages/Module_08.py", label="Testing Frequencies", icon=":material/superscript:") st.page_link("pages/Module_09.py", label="Summary", icon=":material/format_list_numbered:") st.page_link("pages/Bonus.py", label="Bonus Content", icon="🎁") with st.expander("Ask the AI", expanded=False, icon=":material/smart_toy:"): if AI_question := st.text_input(label="Ask the AI", value="", key="AI_Question"): try: AI_gen_response(prompt=AI_question) except Exception as e: print("ERROR: Dialog box open. Message suppressed\n",str(e)) # Callback function to clear text from previous AI question inputs def set_name(new_text): st.session_state.AI_Question = new_text st.button('Clear',on_click=set_name, args=['']) with st.expander("R Calculator", expanded=False, icon=":material/calculate:"): components.html("", height=550, width = 550, scrolling=True) # Streamlit 1.47 is required to run expanders within expanders #with st.expander("Statistical Tests", expanded=False, icon=":material/functions:"): # overview_dir = os.path.join(st.session_state["start_dir"],"pages","assets") # overview_str = open(os.path.join(overview_dir,"formulas.txt"), "r", encoding="UTF-8").read() # format_text(overview_str, overview_dir) with st.expander("Your Progress",expanded=False, icon=":material/preliminary:"): progress_str = open(os.path.join(st.session_state["start_dir"],"pages","assets","progress.txt"), "r", encoding="UTF-8").read() st.markdown(progress_str) with st.expander("Logout", expanded=False, icon=":material/logout:"): # if st.button("Logout"): del st.session_state["logged_in"] st.switch_page("app.py") # switch to main page st.rerun() # should invoke login dialog # END Sidebar # -------------------------------------------------- # Main Screen Content - e.g. Pages with Tabs # -------------------------------------------------- if "logged_in" not in st.session_state: # check if user has logged out with sidebar pass else: load_content(module) # otherwise load and provide access to the content # ================================================= # Standalone Code if Program is Run as Main # ================================================= if __name__ == "__main__": pass