Spaces:
Running
Running
import os | |
import openai | |
import streamlit as st | |
from transformers import pipeline | |
from helpers.foundation_models import * | |
openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"]) | |
st.title("π Streamlit + Hugging Face Demo π€") | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
with st.expander("Instructions"): | |
st.sidebar.markdown( | |
r""" | |
# π Streamlit + Hugging Face Demo π€ | |
## Introduction π | |
This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit. | |
## Setup π οΈ | |
1. Install Requirements: | |
- Streamlit: `pip install streamlit` | |
- Hugging Face Transformers: `pip install transformers` | |
## Running the Demo π | |
1. Clone the repository: `git clone <repo-url>` | |
2. Navigate to the project directory: `cd <project-directory>` | |
3. Run Streamlit: `streamlit run app.py` | |
## Features π | |
- **Text Input** π: Enter your query in the text box. | |
- **Model Selection** π€: Choose an LLM from a dropdown menu. | |
- **Submit Button** β : Click to submit your query to the model. | |
- **Responses** π¬: View the model's responses in real-time. | |
## Contributing π€ | |
Feel free to fork the repository, make changes, and submit pull requests! | |
## License π | |
This project is licensed under the MIT License. | |
## Contact π¬ | |
For any queries, contact us at `email@example.com`. | |
## Happy Coding! π | |
""" | |
) | |
option = st.sidebar.selectbox( | |
"Which task do you want to do?", | |
("Sentiment Analysis", "Medical Summarization", "ChatGPT"), | |
) | |
clear_button = st.sidebar.button("Clear Conversation", key="clear") | |
# Reset everything | |
if clear_button: | |
st.session_state.messages = [] | |
# React to user input | |
if prompt := st.chat_input("What is up?"): | |
# Display user message in chat message container | |
st.chat_message("user").markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
if option == "Sentiment Analysis": | |
pipe_sentiment_analysis = pipeline("sentiment-analysis") | |
if prompt: | |
out = pipe_sentiment_analysis(prompt) | |
doc = f""" | |
Prompt: {prompt} | |
Sentiment: {out[0]["label"]} | |
Score: {out[0]["score"]} | |
""" | |
elif option == "Medical Summarization": | |
pipe_summarization = pipeline( | |
"summarization", model="Falconsai/medical_summarization" | |
) | |
if prompt: | |
out = pipe_summarization(prompt) | |
doc = out[0]["summary_text"] | |
elif option == "ChatGPT": | |
if prompt: | |
out = call_chatgpt(query=prompt) | |
doc = out | |
else: | |
None | |
response = f"{doc}" | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |