Littlehongman's picture
fix: Load model upfront
d14c041
raw
history blame
1.67 kB
import streamlit as st
import streamlit.components.v1 as components
from PIL import Image
from predict import generate_text
from model import load_clip_model, load_gpt_model, load_model
# Configure Streamlit page
st.set_page_config(page_title="Caption Machine", page_icon="πŸ’₯")
# Set Session
model, image_transform, tokenizer = load_model()
if 'model' not in st.session_state:
st.session_state['model'] = model
if 'image_transform' not in st.session_state:
st.session_state['image_transform'] = image_transform
if 'tokenizer' not in st.session_state:
st.session_state['tokenizer'] = tokenizer
# Force responsive layout for columns also on mobile
st.write(
"""<style>
[data-testid="column"] {
width: calc(50% - 1rem);
flex: 1 1 calc(50% - 1rem);
min-width: calc(50% - 1rem);
}
</style>""",
unsafe_allow_html=True,
)
# Render Streamlit page
st.title("Image Captioner")
st.markdown(
"This app generates Image Caption using OpenAI's [GPT-2](https://openai.com/research/better-language-models) and [CLIP](https://openai.com/research/clip) model."
)
# st.subheader("Model Architecture")
# image = Image.open('model.png')
# st.image(image, caption=None, width=500)
upload_file = st.file_uploader("Upload an image:", type=['png','jpg','jpeg'])
# Checking the Format of the page
if upload_file is not None:
img = Image.open(upload_file)
st.image(img)
st.write("Image Uploaded Successfully")
# gpt_model, tokenizer = load_gpt_model()
caption = generate_text(st.session_state['model'], img, st.session_state['tokenizer'], st.session_state['image_transform'])
st.write(caption)