streamlit-demo / app.py
eagle0504's picture
first commit
aec35e1
raw
history blame
3.39 kB
import os
import openai
import streamlit as st
from transformers import pipeline
from helpers.foundation_models import *
openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
st.title("🌟 Streamlit + Hugging Face Demo πŸ€–")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
with st.expander("Instructions"):
st.sidebar.markdown(
r"""
# 🌟 Streamlit + Hugging Face Demo πŸ€–
## Introduction πŸ“–
This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit.
## Setup πŸ› οΈ
1. Install Requirements:
- Streamlit: `pip install streamlit`
- Hugging Face Transformers: `pip install transformers`
## Running the Demo πŸš€
1. Clone the repository: `git clone <repo-url>`
2. Navigate to the project directory: `cd <project-directory>`
3. Run Streamlit: `streamlit run app.py`
## Features 🌈
- **Text Input** πŸ“: Enter your query in the text box.
- **Model Selection** πŸ€–: Choose an LLM from a dropdown menu.
- **Submit Button** βœ…: Click to submit your query to the model.
- **Responses** πŸ’¬: View the model's responses in real-time.
## Contributing 🀝
Feel free to fork the repository, make changes, and submit pull requests!
## License πŸ“œ
This project is licensed under the MIT License.
## Contact πŸ“¬
For any queries, contact us at `email@example.com`.
## Happy Coding! πŸŽ‰
"""
)
option = st.sidebar.selectbox(
"Which task do you want to do?",
("Sentiment Analysis", "Medical Summarization", "ChatGPT"),
)
clear_button = st.sidebar.button("Clear Conversation", key="clear")
# Reset everything
if clear_button:
st.session_state.messages = []
# React to user input
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
if option == "Sentiment Analysis":
pipe_sentiment_analysis = pipeline("sentiment-analysis")
if prompt:
out = pipe_sentiment_analysis(prompt)
doc = f"""
Prompt: {prompt}
Sentiment: {out[0]["label"]}
Score: {out[0]["score"]}
"""
elif option == "Medical Summarization":
pipe_summarization = pipeline(
"summarization", model="Falconsai/medical_summarization"
)
if prompt:
out = pipe_summarization(prompt)
doc = out[0]["summary_text"]
elif option == "ChatGPT":
if prompt:
out = call_chatgpt(query=prompt)
doc = out
else:
None
response = f"{doc}"
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})