Spaces:
Sleeping
Sleeping
# Written by Dr Daniel Buscombe, Marda Science LLC | |
# | |
# MIT License | |
# | |
# Copyright (c) 2022, Marda Science LLC | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in all | |
# copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
# SOFTWARE. | |
# from imageio import imread | |
import pywt | |
#from tqdm import tqdm | |
from skimage.restoration import denoise_wavelet, estimate_sigma | |
from functools import partial | |
# rescale_sigma=True required to silence deprecation warnings | |
_denoise_wavelet = partial(denoise_wavelet, rescale_sigma=True) | |
import numpy as np | |
import scipy.stats as stats | |
from glob import glob | |
def rescale(dat,mn,mx): | |
""" | |
rescales an input dat between mn and mx | |
""" | |
m = min(dat.flatten()) | |
M = max(dat.flatten()) | |
return (mx-mn)*(dat-m)/(M-m)+mn | |
##==================================== | |
def standardize(img): | |
img = np.array(img) | |
#standardization using adjusted standard deviation | |
N = np.shape(img)[0] * np.shape(img)[1] | |
s = np.maximum(np.std(img), 1.0/np.sqrt(N)) | |
m = np.mean(img) | |
img = (img - m) / s | |
img = rescale(img, 0, 1) | |
del m, s, N | |
return img | |
# ========================================================= | |
# ========================================================= | |
def dgs(input_img, resolution=1, maxscale=4, verbose=1, x=-0.5): | |
#if verbose==1: | |
print("===========================================") | |
print("======DIGITAL GRAIN SIZE: WAVELET==========") | |
print("===========================================") | |
print("=CALCULATE GRAIN SIZE-DISTRIBUTION FROM AN=") | |
print("====IMAGE OF SEDIMENT/GRANULAR MATERIAL====") | |
print("===========================================") | |
print("======A PROGRAM BY DANIEL BUSCOMBE=========") | |
print("====MARDASCIENCE, FLAGSTAFF, ARIZONA=======") | |
print("========REVISION 4.2, APR 2022===========") | |
print("===========================================") | |
# ======= stage 1 ========================== | |
#read image | |
if verbose==1: | |
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~") | |
print('Processing image ') | |
im = np.squeeze(input_img) # squeeze singleton dimensions | |
if len(np.shape(im))>3: | |
im = im[:, :, :3] # only keep the first 3 bands | |
if len(np.shape(im))==3: # if rgb, convert to grey | |
im = (0.299 * im[:,:,0] + 0.5870*im[:,:,1] + 0.114*im[:,:,2]).astype('uint8') | |
nx,ny = np.shape(im) | |
if nx>ny: | |
im=im.T | |
im = standardize(im) | |
# # ======= stage 2 ========================== | |
# Denoised image using default parameters of `denoise_wavelet` | |
filter=False | |
if filter: | |
sigma_est = estimate_sigma(im, multichannel=False, average_sigmas=True) | |
region = denoise_wavelet(im, multichannel=False, rescale_sigma=True, | |
method='VisuShrink', mode='soft', sigma=sigma_est) | |
else: | |
region = im.copy() | |
original = rescale(region,0,255) | |
nx, ny = original.shape | |
# ======= stage 3 ========================== | |
# call cwt to get particle size distribution | |
## initial guess | |
P = []; M = [] | |
for k in np.linspace(1,nx-1,40): | |
[cfs, frequencies] = pywt.cwt(original[int(k),:], np.arange(3, np.maximum(nx,ny)/maxscale, 1), 'morl' , .5) | |
period = 1. / frequencies | |
power =(abs(cfs)) ** 2 | |
power = np.mean(np.abs(power), axis=1)/(period**2) | |
P.append(power) | |
M.append(period[np.argmax(power)]) | |
p = np.mean(np.vstack(P), axis=0) | |
p = np.array(p/np.sum(p)) | |
# get real scales by multiplying by resolution (mm/pixel) | |
scales = np.array(period)*resolution | |
print(np.sum(p*scales)) | |
if np.sum(p*scales)>80: | |
x=1 | |
maxscale=4 | |
elif (np.sum(p*scales)<80) and (np.sum(p*scales)>60): | |
x=0.75 | |
maxscale=8 | |
elif (np.sum(p*scales)<60) and (np.sum(p*scales)>40): | |
x=0.5 | |
maxscale=12 | |
elif (np.sum(p*scales)<40) and (np.sum(p*scales)>20): | |
x=-0.5 | |
maxscale=16 | |
elif np.sum(p*scales)<20: | |
x=-1 | |
maxscale=20 | |
print("x is {}".format(x)) | |
print("maxscale is {}".format(maxscale)) | |
## for real | |
P = []; M = [] | |
for k in np.linspace(1,nx-1,100): | |
[cfs, frequencies] = pywt.cwt(original[int(k),:], np.arange(3, np.maximum(nx,ny)/maxscale, 1), 'morl' , .5) | |
period = 1. / frequencies | |
power =(abs(cfs)) ** 2 | |
power = np.mean(np.abs(power), axis=1)/(period**2) | |
P.append(power) | |
M.append(period[np.argmax(power)]) | |
p = np.mean(np.vstack(P), axis=0) | |
p = np.array(p/np.sum(p)) | |
# get real scales by multiplying by resolution (mm/pixel) | |
scales = np.array(period)*resolution | |
srt = np.sqrt(np.sum(p*((scales-np.mean(M))**2))) | |
# plt.plot(scales, p,'m', lw=2) | |
p = p+stats.norm.pdf(scales, np.mean(M), srt/np.pi) | |
p = p/np.sum(p) | |
mnsz = np.sum(p*scales) | |
srt = np.sqrt(np.sum(p*((scales-mnsz)**2))) | |
ind =np.where(scales < (mnsz+3*srt))[0] | |
scales= scales[ind] | |
p = p[ind] | |
# p = np.hstack([0,p]) | |
# scales = np.hstack([0,scales]) | |
# area-by-number to volume-by-number | |
r_v = (p*scales**x) / np.sum(p*scales**x) #volume-by-weight proportion | |
# ======= stage 5 ========================== | |
# calc particle size stats | |
pd = np.interp([.05,.1,.16,.25,.3,.5,.75,.84,.9,.95],np.hstack((0,np.cumsum(r_v))), np.hstack((0,scales)) ) | |
if verbose==1: | |
print("d50 = "+str(pd[4])) | |
mnsz = np.sum(r_v*scales) | |
if verbose==1: | |
print("mean size = "+str(mnsz)) | |
srt = np.sqrt(np.sum(r_v*((scales-mnsz)**2))) | |
if verbose==1: | |
print("stdev = "+str(srt)) | |
sk = (sum(r_v*((scales-mnsz)**3)))/(100*srt**3) | |
if verbose==1: | |
print("skewness = "+str(sk)) | |
kurt = (sum(r_v*((scales-mnsz)**4)))/(100*srt**4) | |
if verbose==1: | |
print("kurtosis = "+str(kurt)) | |
# ======= stage 6 ========================== | |
# return a dict object of stats | |
return {'mean grain size': mnsz, 'grain size sorting': srt, 'grain size skewness': sk, 'grain size kurtosis': kurt, 'percentiles': [.05,.1,.16,.25,.3,.5,.75,.84,.9,.95], 'percentile_values': pd, 'grain size frequencies': r_v, 'grain size bins': scales} | |