| import streamlit as st | |
| import pandas as pd | |
| import logging | |
| import json | |
| from dotenv import load_dotenv | |
| import modeling | |
| def show_launch(placeholder): | |
| with placeholder.container(): | |
| st.divider() | |
| st.markdown(""" | |
| ## Before Using the App | |
| ### Disclaimer | |
| This application is provided as-is, without any warranty or guarantee of any kind, expressed or implied. It is intended for educational, non-commercial use only. | |
| The developers of this app shall not be held liable for any damages or losses incurred from its use. By using this application, you agree to the terms and conditions | |
| outlined herein and acknowledge that any commercial use or reliance on its functionality is strictly prohibited. | |
| """, unsafe_allow_html=True) | |
| button_placeholder = st.empty() | |
| if button_placeholder.button(label='Accept Disclaimer', type='primary', use_container_width=True): | |
| st.session_state.show_launch = False | |
| placeholder.empty() | |
| button_placeholder.empty() | |
| def show_demo(placeholder): | |
| with placeholder: | |
| with st.container(): | |
| st.divider() | |
| st.markdown(""" | |
| ## Try it yourself! | |
| Use the input fields provided below to create items aimed at | |
| assessing a particular psychological construct (e.g., personality | |
| trait). If desired, employ the prefix option to generate items | |
| that begin with a predetermined string. To manage the diversity | |
| of the output, various sampling strategies may be applied. | |
| For further information on these strategies, please refer to the | |
| accompanying paper. | |
| """) | |
| modeling.load_model() | |
| sampling_options = ['Greedy Search', 'Beam Search', 'Multinominal Sampling'] | |
| sampling_input = st.radio('Sampling', options=sampling_options, index=2, horizontal=True) | |
| left_col, right_col = st.columns([1, 1]) | |
| with left_col: | |
| prefix_input = st.text_input('Prefix', '') | |
| construct_input = st.text_input('Construct', 'Pessimism') | |
| with right_col: | |
| if sampling_options.index(sampling_input) == 0: | |
| num_beams = 1 | |
| num_return_sequences = 1 | |
| temperature = 1 | |
| top_k = 0 | |
| top_p = 1 | |
| if sampling_options.index(sampling_input) == 1: | |
| num_beams = st.slider('Number of Search Beams', min_value=1, max_value=10, value=3, step=1) | |
| num_return_sequences = st.slider('Number of Beams to Return', min_value=1, max_value=10, value=2, step=1) | |
| temperature = 1 | |
| top_k = 0 | |
| top_p = 1 | |
| if sampling_options.index(sampling_input) == 2: | |
| num_beams = 1 | |
| num_return_sequences = 1 | |
| temperature = st.slider('Temperature', min_value=0.1, max_value=1.5, value=1.0, step=0.1) | |
| top_k = st.slider('Top k (0 = disabled)', min_value=0, max_value=1000, value=40, step=1) | |
| top_p = st.slider('Top p (0 = disabled)', min_value=0.0, max_value=1.0, value=0.95, step=0.05) | |
| message = st.empty() | |
| if st.button(label='Generate Item', type='primary', use_container_width=True): | |
| if num_return_sequences <= num_beams: | |
| if len(construct_input) > 0: | |
| kwargs = { | |
| 'num_return_sequences': num_return_sequences, | |
| 'num_beams': num_beams, | |
| 'do_sample': sampling_options.index(sampling_input) == 2, | |
| 'temperature': temperature, | |
| 'top_k': top_k, | |
| 'top_p': top_p | |
| } | |
| item_stems = modeling.generate_items(construct_input, prefix_input, **kwargs) | |
| st.session_state.outputs.append({'construct': construct_input, 'item': item_stems}) | |
| else: | |
| message.error('You have to enter a construct to proceed with item generation!') | |
| else: | |
| message.error('You cannot return more beams than to search for!') | |
| if len(st.session_state.outputs) > 0: | |
| tab1, tab2 = st.tabs(["Generated Items", "Details on last prompt"]) | |
| with tab1: | |
| for output in st.session_state.outputs: | |
| placeholder_outputs = st.empty() | |
| with tab2: | |
| pass | |
| df = pd.DataFrame(st.session_state.outputs).explode(column='item').reset_index() | |
| placeholder_outputs = st.dataframe(df.sort_values(by='index', ascending=False), use_container_width=True) | |
| def initialize(): | |
| load_dotenv() | |
| logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) | |
| if 'state_loaded' not in st.session_state: | |
| st.session_state['state_loaded'] = True | |
| with open('init.json') as json_data: | |
| st.session_state.update(json.load(json_data)) | |
| def main(): | |
| st.set_page_config(page_title='Construct-Specific Automatic Item Generation') | |
| col1, col2 = st.columns([2, 5]) | |
| with col1: | |
| st.image('logo-130x130.svg') | |
| with col2: | |
| st.markdown("# Construct-Specific Automatic Item Generation") | |
| st.markdown(""" | |
| This web application showcases item generation for psychological scale development | |
| using natural language processing ("AI"), accompanying the paper | |
| "Transformer-Based Deep Neural Language Modeling for Construct-Specific Automatic Item Generation". | |
| π Paper (Open Access): https://link.springer.com/article/10.1007/s11336-021-09823-9 | |
| πΎ Data: https://osf.io/rhe9w/ | |
| ποΈ Cite:<br> Hommel, B. E., Wollang, F.-J. M., Kotova, V., Zacher, H., & Schmukle, S. C. (2022). Transformer-Based Deep Neural Language Modeling for Construct-Specific Automatic Item Generation. Psychometrika, 87(2), 749β772. https://doi.org/10.1007/s11336-021-09823-9 | |
| #οΈβ£ Twitter/X: https://twitter.com/BjoernHommel | |
| The web application is maintained by [magnolia psychometrics](https://www.magnolia-psychometrics.com/). | |
| """, unsafe_allow_html=True) | |
| placeholder_launch = st.empty() | |
| placeholder_demo = st.empty() | |
| if 'disclaimer' not in st.session_state: | |
| show_launch(placeholder_launch) | |
| st.session_state['disclaimer'] = True | |
| else: | |
| show_demo(placeholder_demo) | |
| if __name__ == '__main__': | |
| initialize() | |
| main() |