CosmoS / Home.py
CosmoAI's picture
Update Home.py
8af2c34
import streamlit as st
from streamlit_option_menu import option_menu
from transformers import pipeline, Conversation
# convo = pipeline(task="conversational", model="microsoft/DialoGPT-medium")
# imgclassifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k")
# qnabot = pipeline(task="question-answering", model="distilbert-base-cased-distilled-squad")
# txtgen = pipeline(task="text-generation", model="EleutherAI/gpt-neo-2.7B")
# txtclassifi = pipeline(task="text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment")
# summurize = pipeline(task="summarization", model="sshleifer/distilbart-cnn-12-6")
# visualqna = pipeline(task="vqa", model="microsoft/DialoGPT-medium")
visualqna = pipeline(model="dandelin/vilt-b32-finetuned-vqa")
def load_image():
with st.sidebar:
if img := st.text_input("Enter Image URL") or st.selectbox("Select Image", ("https://images.unsplash.com/photo-1593466144596-8abd50ad2c52?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&auto=format&fit=crop&w=3434&q=80", "https://images.unsplash.com/photo-1566438480900-0609be27a4be?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&auto=format&fit=crop&w=3394&q=80")):
if st.button("Load Image"):
st.write("Image Uploaded!")
st.image(img)
else:
st.warning("Please enter an image URL and click 'Load Image' before asking a question.")
return img
# def homepage():
# st.write("Timeline")
# # allmessages =[]
# if "messages" not in st.session_state:
# st.session_state.messages = []
# if usrmsg := st.chat_input("Share a thought"):
# st.session_state.messages.append(usrmsg)
# with st.chat_message("user"):
# st.session_state.messages
# def chat():
# st.title("Chit-Chatbot")
# if query := st.chat_input("Enter your message"):
# uquery = Conversation(query)
# response = convo(uquery)
# with st.chat_message("assistant"):
# st.write(response.generated_responses[-1])
# def image_classifi():
# st.title("Image Classification")
# file = st.text_input("Enter Image URL")
# output = imgclassifier(file)
# if st.button("View Results"):
# st.write(output)
# def qna_bot():
# st.title("Q&A-Chatbot")
# if query := st.chat_input("Enter your message"):
# response = qnabot(query)
# with st.chat_message("assistant"):
# st.write(response)
# def txt_gen():
# st.title("Text Generation")
# if query := st.chat_input("Enter your message"):
# response = txtgen(query)
# with st.chat_message("assistant"):
# st.write(response)
# def txt_classifi():
# st.title("Text Classification")
# if query := st.chat_input("Enter your message"):
# response = txtclassifi(query,)
# with st.chat_message("assistant"):
# st.write(response)
# def summury():
# st.title("Summury")
# if query := st.chat_input("Enter your message"):
# response = summurize(query, min_length=5, max_length=20)
# with st.chat_message("assistant"):
# st.write(response)
def visual_qna():
st.title("Visual Q&A")
img = load_image()
if img:
if query := st.chat_input("Enter your message"):
response = visualqna(question=query, image=img)
with st.chat_message("assistant"):
st.write(response)
else:
st.warning("Please enter an image URL and click 'Load Image' before asking a question.")
def dashboard():
with st.sidebar:
selected = option_menu(None, ['Conversational', "Q&A", "Text Generation", "Text Classification", "Image Classification", "Summurization", "Visual Q&A" , "Logout"],
icons=['πŸ’¬','❓', 'πŸ“', 'πŸ”€', 'πŸ–ΌοΈ', 'πŸ“‘', 'πŸ”Ž', 'πŸ”“'])
# if selected == 'Home':
# homepage()
if selected == 'Visual Q&A':
visual_qna()
# elif selected == "Image Classification":
# image_classifi()
elif selected == 'Logout':
st.session_state.user = None
st.experimental_rerun()
# elif selected == "Invoke Document":
# invoke_document()
# elif selected == "Invoke Audio":
# invoke_audio()
# elif selected == "Invoke Video":
# invoke_video()
# elif selected == "Invoke Image":
# invoke_image()
# elif selected == "Invoke Text":
# invoke_text()