import streamlit as st from PIL import Image from transformers import GPT2TokenizerFast, ViTImageProcessor, VisionEncoderDecoderModel # load image captioning model and corresponding tokenizer and image processor model = VisionEncoderDecoderModel.from_pretrained("jojo-ai-mst/image-vision-cap") tokenizer = GPT2TokenizerFast.from_pretrained("jojo-ai-mst/image-vision-cap") image_processor = ViTImageProcessor.from_pretrained("jojo-ai-mst/image-vision-cap") def generate_caption(image): image = Image.open(image) pixel_values = image_processor(image, return_tensors="pt").pixel_values # autoregressively generate caption (uses greedy decoding by default) generated_ids = model.generate(pixel_values) generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_text) return generated_text st.header("Welcome to Vision Caption Prototype",divider="rainbow") uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) st.markdown(""" """, unsafe_allow_html=True) st.divider() if st.button("Get Answer"): if uploaded_file is not None: st.header("Result") st.image(uploaded_file, caption=uploaded_file.name, use_column_width=True) #answer = "The answer will be generated by AI" caption = generate_caption(uploaded_file) st.subheader(caption) else: st.write("Please upload an image and type a question.")