dbuscombe's picture
v1
80ad4d4
## Daniel Buscombe, Marda Science LLC 2023
# This file contains many functions originally from Doodleverse https://github.com/Doodleverse programs
import gradio as gr
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage.io import imsave
from skimage.filters import threshold_otsu
from skimage.measure import EllipseModel, CircleModel, ransac
##========================================================
def fromhex(n):
"""hexadecimal to integer"""
return int(n, base=16)
##========================================================
def label_to_colors(
img,
mask,
alpha, # =128,
colormap, # =class_label_colormap, #px.colors.qualitative.G10,
color_class_offset, # =0,
do_alpha, # =True
):
"""
Take MxN matrix containing integers representing labels and return an MxNx4
matrix where each label has been replaced by a color looked up in colormap.
colormap entries must be strings like plotly.express style colormaps.
alpha is the value of the 4th channel
color_class_offset allows adding a value to the color class index to force
use of a particular range of colors in the colormap. This is useful for
example if 0 means 'no class' but we want the color of class 1 to be
colormap[0].
"""
colormap = [
tuple([fromhex(h[s : s + 2]) for s in range(0, len(h), 2)])
for h in [c.replace("#", "") for c in colormap]
]
cimg = np.zeros(img.shape[:2] + (3,), dtype="uint8")
minc = np.min(img)
maxc = np.max(img)
for c in range(minc, maxc + 1):
cimg[img == c] = colormap[(c + color_class_offset) % len(colormap)]
cimg[mask == 1] = (0, 0, 0)
if do_alpha is True:
return np.concatenate(
(cimg, alpha * np.ones(img.shape[:2] + (1,), dtype="uint8")), axis=2
)
else:
return cimg
##====================================
def standardize(img):
# standardization using adjusted standard deviation
N = np.shape(img)[0] * np.shape(img)[1]
s = np.maximum(np.std(img), 1.0 / np.sqrt(N))
m = np.mean(img)
img = (img - m) / s
del m, s, N
#
if np.ndim(img) == 2:
img = np.dstack((img, img, img))
return img
############################################################
############################################################
#load model
filepath = './saved_model'
model = tf.keras.models.load_model(filepath, compile = True)
model.compile
#segmentation
def segment(input_img, dims=(1024, 1024)):
w = input_img.shape[0]
h = input_img.shape[1]
img = standardize(input_img)
img = resize(img, dims, preserve_range=True, clip=True)
img = np.expand_dims(img,axis=0)
est_label = model.predict(img)
#Test Time Augmentation
est_label2 = np.flipud(model.predict((np.flipud(img)), batch_size=1))
est_label3 = np.fliplr(model.predict((np.fliplr(img)), batch_size=1))
est_label4 = np.flipud(np.fliplr(model.predict((np.flipud(np.fliplr(img))))))
#soft voting - sum the softmax scores to return the new TTA estimated softmax scores
est_label = est_label + est_label2 + est_label3 + est_label4
est_label /= 4
pred = np.squeeze(est_label, axis=0)
pred = resize(pred, (w, h), preserve_range=True, clip=True)
bias=.1
thres_coin = threshold_otsu(pred[:,:,1])-bias
print("Coin threshold: %f" % (thres_coin))
mask = (pred[:,:,1]<=thres_coin).astype('uint8')
imsave("greyscale.png", mask*255)
class_label_colormap = [
"#3366CC",
"#DC3912",
"#FF9900",
]
# add classes
class_label_colormap = class_label_colormap[:2]
color_label = label_to_colors(
mask,
input_img[:, :, 0] == 0,
alpha=128,
colormap=class_label_colormap,
color_class_offset=0,
do_alpha=False,
)
imsave("color.png", color_label)
#overlay plot
plt.clf()
plt.imshow(input_img,cmap='gray')
plt.imshow(color_label, alpha=0.4)
plt.axis("off")
plt.margins(x=0, y=0)
############################################################
dst = 1-mask.squeeze()
points = np.array(np.nonzero(dst)).T
points = np.column_stack((points[:,1], points[:,0]))
# print("Fitting ellipse to coin to compute diameter ....")
# model_robust, inliers = ransac(points, EllipseModel, min_samples=100,residual_threshold=2, max_trials=3)
# r=np.max([model_robust.params[2] , model_robust.params[3]])
# x=model_robust.params[0]
# y=model_robust.params[1]
# a_over_b = model_robust.params[2] / model_robust.params[3] ##a/b
print("Fitting circle to coin to compute diameter ....")
model_robust, inliers = ransac(points, CircleModel, min_samples=100,residual_threshold=2, max_trials=100)
r=model_robust.params[2]
x=model_robust.params[0]
y=model_robust.params[1]
print('diameter of coin = %f pixels' % (r*2))
print('image scaling (assuming quarter dollar) = %f mm/pixel' % (24.26 / r*2))
plt.plot(x, y, 'ko')
plt.plot(np.arange(x-r, x+r, int(r*2)), np.arange(y-r, y+r, int(r*2)),'m')
plt.savefig("overlay.png", dpi=300, bbox_inches="tight")
return 'diameter of coin = %f pixels' % (r*2), 'image scaling (assuming quarter dollar) = %f mm/pixel' % (24.26 / r*2), color_label, plt , "greyscale.png", "color.png", "overlay.png"
title = "Find and measure coins in images of sand!"
description = "This model demonstration segments beach sediment imagery into two classes: a) background, and b) coin, then measuring the coin. Allows upload of imagery and download of label imagery only one at a time. This model is part of the Doodleverse https://github.com/Doodleverse"
examples = [
['examples/IMG_20210922_170908944.jpg'],
['examples/20210208_172834.jpg'],
['examples/20220101_165359.jpg']
]
inp = gr.Image()
out1 = gr.Image(type='numpy')
out2 = gr.Plot(type='matplotlib')
out3 = gr.File()
out4 = gr.File()
out5 = gr.File()
Segapp = gr.Interface(segment, inp, ["text", "text", out1, out2, out3, out4, out5], title = title, description = description, examples=examples, theme="grass")
#, allow_flagging='manual', flagging_options=["bad", "ok", "good", "perfect"], flagging_dir="flagged")
Segapp.launch(enable_queue=True)