|
import streamlit as st |
|
import sparknlp |
|
import os |
|
import pandas as pd |
|
|
|
from sparknlp.base import * |
|
from sparknlp.annotator import * |
|
from pyspark.ml import Pipeline |
|
from sparknlp.pretrained import PretrainedPipeline |
|
|
|
|
|
st.set_page_config( |
|
layout="wide", |
|
initial_sidebar_state="auto" |
|
) |
|
|
|
|
|
st.markdown(""" |
|
<style> |
|
.main-title { |
|
font-size: 36px; |
|
color: #4A90E2; |
|
font-weight: bold; |
|
text-align: center; |
|
} |
|
.section { |
|
background-color: #f9f9f9; |
|
padding: 10px; |
|
border-radius: 10px; |
|
margin-top: 10px; |
|
} |
|
.section p, .section ul { |
|
color: #666666; |
|
} |
|
</style> |
|
""", unsafe_allow_html=True) |
|
|
|
@st.cache_resource |
|
def init_spark(): |
|
return sparknlp.start() |
|
|
|
@st.cache_resource |
|
def create_pipeline(model): |
|
imageAssembler = ImageAssembler() \ |
|
.setInputCol("image") \ |
|
.setOutputCol("image_assembler") |
|
|
|
imageCaptioning = VisionEncoderDecoderForImageCaptioning \ |
|
.pretrained("image_captioning_vit_gpt2") \ |
|
.setBeamSize(2) \ |
|
.setDoSample(False) \ |
|
.setInputCols(["image_assembler"]) \ |
|
.setOutputCol("caption") |
|
|
|
pipeline = Pipeline(stages=[imageAssembler, imageCaptioning]) |
|
return pipeline |
|
|
|
def fit_data(pipeline, data): |
|
empty_df = spark.createDataFrame([['']]).toDF('text') |
|
model = pipeline.fit(empty_df) |
|
light_pipeline = LightPipeline(model) |
|
annotations_result = light_pipeline.fullAnnotateImage(data) |
|
return annotations_result[0]['caption'][0].result |
|
|
|
def save_uploadedfile(uploadedfile): |
|
filepath = os.path.join(IMAGE_FILE_PATH, uploadedfile.name) |
|
with open(filepath, "wb") as f: |
|
if hasattr(uploadedfile, 'getbuffer'): |
|
f.write(uploadedfile.getbuffer()) |
|
else: |
|
f.write(uploadedfile.read()) |
|
|
|
|
|
model_list = ['image_captioning_vit_gpt2'] |
|
model = st.sidebar.selectbox( |
|
"Choose the pretrained model", |
|
model_list, |
|
help="For more info about the models visit: https://sparknlp.org/models" |
|
) |
|
|
|
|
|
st.markdown(f'<div class="main-title">VisionEncoderDecoder For Image Captioning</div>', unsafe_allow_html=True) |
|
|
|
|
|
|
|
link = """ |
|
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/image/VisionEncoderDecoderForImageCaptioning.ipynb"> |
|
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/> |
|
</a> |
|
""" |
|
st.sidebar.markdown('Reference notebook:') |
|
st.sidebar.markdown(link, unsafe_allow_html=True) |
|
|
|
|
|
IMAGE_FILE_PATH = f"inputs" |
|
image_files = sorted([file for file in os.listdir(IMAGE_FILE_PATH) if file.split('.')[-1]=='png' or file.split('.')[-1]=='jpg' or file.split('.')[-1]=='JPEG' or file.split('.')[-1]=='jpeg']) |
|
|
|
img_options = st.selectbox("Select an image", image_files) |
|
uploadedfile = st.file_uploader("Try it for yourself!") |
|
|
|
if uploadedfile: |
|
file_details = {"FileName":uploadedfile.name,"FileType":uploadedfile.type} |
|
save_uploadedfile(uploadedfile) |
|
selected_image = f"{IMAGE_FILE_PATH}/{uploadedfile.name}" |
|
elif img_options: |
|
selected_image = f"{IMAGE_FILE_PATH}/{img_options}" |
|
|
|
st.subheader('Classified Image') |
|
|
|
image_size = st.slider('Image Size', 400, 1000, value=400, step = 100) |
|
|
|
try: |
|
st.image(f"{IMAGE_FILE_PATH}/{selected_image}", width=image_size) |
|
except: |
|
st.image(selected_image, width=image_size) |
|
|
|
st.subheader('Classification') |
|
|
|
spark = init_spark() |
|
Pipeline = create_pipeline(model) |
|
output = fit_data(Pipeline, selected_image) |
|
|
|
st.markdown(f'This document has been classified as : **{output}**') |