Inpaint_Me / app_inpaint.py
Huertas97's picture
Update app_inpaint.py
f922fa1
raw
history blame
6.36 kB
import streamlit as st
import time
import easyocr
import math
from pathlib import Path
from PIL import Image, ImageDraw
import PIL
import io
import os
import random
import matplotlib.pyplot as plt
import cv2
# from google.colab.patches import cv2_imshow
import numpy as np
from tqdm.auto import tqdm
import shutil
import base64
import logging
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
@st.cache(show_spinner=False, allow_output_mutation=True, suppress_st_warning=True)
def load_models():
#specify shortform of language you want to extract,
# I am using Spanish(es) and English(en) here by list of language ids
reader = easyocr.Reader(['en'],)
return reader
reader = load_models()
def midpoint(x1, y1, x2, y2):
x_mid = int((x1 + x2)/2)
y_mid = int((y1 + y2)/2)
return (x_mid, y_mid)
def inpaint_text(img, text_coordinates):
# read image
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# generate (word, box) tuples
mask = np.zeros(img.shape[:2], dtype="uint8")
for box in text_coordinates:
x0, y0 = box[0]
x1, y1 = box[1]
x2, y2 = box[2]
x3, y3 = box[3]
x_mid0, y_mid0 = midpoint(x1, y1, x2, y2)
x_mid1, y_mi1 = midpoint(x0, y0, x3, y3)
thickness = int(math.sqrt( (x2 - x1)**2 + (y2 - y1)**2 ))
cv2.line(mask, (x_mid0, y_mid0), (x_mid1, y_mi1), 255,
thickness)
img = cv2.inpaint(img, mask, 7, cv2.INPAINT_NS)
return(img)
def file_selector(folder_path='.'):
filenames = os.listdir(folder_path)
selected_filename = st.selectbox('Select a file', filenames)
return os.path.join(folder_path, selected_filename)
st.set_page_config(
page_title="Inpaint Me",
page_icon=":art:",
layout="wide",
initial_sidebar_state="expanded",
menu_items={
'Get Help': 'https://www.extremelycoolapp.com/help',
'Report a bug': "https://www.extremelycoolapp.com/bug",
'About': "# This is a header. This is an *extremely* cool app!"
}
)
st.markdown(
"""
<style>
.logo-img {
margin-top: auto;
margin-left: 30%;
width: 50%;
}
.logo-img-2 {
margin-top: auto;
margin-left: 20%;
width: 35%;
}
</style>
""",
unsafe_allow_html=True
)
LOGO_IMAGE = "inpaint_me_logo.png"
col1, col2= st.columns([2, 2])
with col1:
# st.image('./aida_logo.png')
st.markdown(
f"""
<img class="logo-img" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE, "rb").read()).decode()}">
""",
unsafe_allow_html=True
)
with col2:
# st.image('./aida_logo.png')
st.markdown(
f"""
<img class="logo-img-2" src="data:image/png;base64,{base64.b64encode(open("aida_logo.png", "rb").read()).decode()}">
""",
unsafe_allow_html=True
)
st.header("")
with st.expander("Project Description", expanded=False):
st.write("""
Developed in Applied Intelligence and Data Analysis ([AI+DA](http://aida.etsisi.upm.es/)) group at Polytech University of Madrid (UPM).
To rule out the possibility of text misleading image Deep Learning models (e.g., CNNs) it is useful to remove text from images. Hence,
this tool uses [EasyOCR](https://github.com/JaidedAI/EasyOCR) and [OpenCV](https://pypi.org/project/opencv-python/) for detecting texts and inpainting them. Currently, only `JPG` files are supported. This tools has been tested on memes, feel free to try some examples or upload your own images.
""")
filename_example = None
if st.checkbox('Select a example'):
folder_path = './Examples/'
# if st.checkbox('Change directory'):
# folder_path = st.text_input('Enter folder path', '.')
filename_example = file_selector(folder_path=folder_path)
st.write('You selected `%s`' % filename_example)
uploaded_file = st.file_uploader(label="Upload image",
type="jpg",
accept_multiple_files=False,
key=None,
help=None,
on_change=None,
args=None,
kwargs=None,
)
col1, col2, col3 = st.columns([2, 0.5, 2])
if filename_example:
with col1:
st.header("Original")
img = Image.open( filename_example )
st.image(img, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")
with col3:
st.header("Inpainted")
with st.spinner('Wait for it...'):
img_array = np.array(Image.open( filename_example ))
# detect text
bounds = reader.readtext(img_array, detail=1) #detail=1 # [(coordinates, detected text, confidence threshold)]
text_coordinates = [ bound[0] for bound in bounds]
# inpaint text coordinates
inpaint_image = inpaint_text(img_array, text_coordinates)
st.image(inpaint_image, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")
if uploaded_file:
with col1:
st.header("Original")
st.image(uploaded_file, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")
with col3:
st.header("Inpainted")
with st.spinner('Wait for it...'):
# Transform loaded file to bytes
bytes_data = uploaded_file.getvalue()
# bytes to numpy array
img_array = np.array(Image.open(io.BytesIO(bytes_data)))
# detect text
bounds = reader.readtext(img_array, detail=1) #detail=1 # [(coordinates, detected text, confidence threshold)]
text_coordinates = [ bound[0] for bound in bounds]
# inpaint text coordinates
inpaint_image = inpaint_text(img_array, text_coordinates)
st.image(inpaint_image, caption=None, width=None, use_column_width=None, clamp=False, channels="RGB", output_format="auto")