import streamlit as st from model import predictor from streamlit.scriptrunner import add_script_run_ctx import audiobot import chatbot import os import threading def runInThread(): print('Initialize model in thread') st.session_state['predictor'] = predictor.Predictor() print('Model is initialized') def run(): st.set_page_config( page_title='Welcome to Visual Question Answering - Bot', page_icon=':robot:', layout='wide' ) os.environ['TOKENIZERS_PARALLELISM'] = 'false' if 'thread' not in st.session_state: st.session_state.thread = threading.Thread(target=runInThread) add_script_run_ctx(st.session_state.thread) st.session_state.thread.start() st.sidebar.title('VQA Bot') st.sidebar.write(''' VQA Bot addresses the challenge of visual question answering with the chat and voice assistance. Here, we merged ViLT(Vision-and-Language Transformer) model fine-tuned on VQAv2 into T5-small (Text-to-Text Transfer Transformer). We pretrained and finetuned our model on Language transformer to get the desired result. Please use the radio buttons below to navigate. ''') selected_page = st.sidebar.radio('Go to', ('VQA Chatbot', 'VQA Audiobot')) if selected_page == 'VQA Chatbot': chatbot.show() elif selected_page == 'VQA Audiobot': audiobot.show() st.caption("Created by Madhuri Sakhare - [Github](https://github.com/msak1612/vqa_chatbot) [Linkedin](https://www.linkedin.com/in/madhuri-sakhare/)") run()