Spaces:
Sleeping
Sleeping
# Written by Dr Daniel Buscombe, Marda Science LLC | |
# for the SandSnap Program | |
# | |
# MIT License | |
# | |
# Copyright (c) 2020-2021, Marda Science LLC | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in all | |
# copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
# SOFTWARE. | |
##> Release v1.4 (Aug 2021) | |
###=================================================== | |
# import libraries | |
from sedinet_utils import * | |
###=================================================== | |
def conv_block2(inp, filters=32, bn=True, pool=True, drop=True): | |
""" | |
This function generates a SediNet convolutional block | |
""" | |
# _ = Conv2D(filters=filters, kernel_size=3, activation='relu', | |
# kernel_initializer='he_uniform')(inp) | |
#relu creating dead neurons? | |
_ = SeparableConv2D(filters=filters, kernel_size=3, activation='relu')(inp) #'relu' #kernel_initializer='he_uniform' | |
if bn: | |
_ = BatchNormalization()(_) | |
if pool: | |
_ = MaxPool2D()(_) | |
if drop: | |
_ = Dropout(0.2)(_) | |
return _ | |
###=================================================== | |
def make_cat_sedinet(ID_MAP, dropout): | |
""" | |
This function creates an implementation of SediNet for estimating | |
sediment category | |
""" | |
base = BASE_CAT ##30 | |
input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) | |
_ = conv_block2(input_layer, filters=base, bn=False, pool=False, drop=False) #x # | |
_ = conv_block2(_, filters=base*2, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*3, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*4, bn=False, pool=True,drop=False) | |
bottleneck = GlobalMaxPool2D()(_) | |
bottleneck = Dropout(dropout)(bottleneck) | |
# for class prediction | |
_ = Dense(units=CAT_DENSE_UNITS, activation='relu')(bottleneck) ##128 | |
output = Dense(units=len(ID_MAP), activation='softmax', name='output')(_) | |
model = Model(inputs=input_layer, outputs=[output]) | |
OPT = tf.keras.optimizers.Adam(learning_rate=MAX_LR) | |
if CAT_LOSS == 'focal': | |
model.compile(optimizer=OPT, | |
loss={'output': tfa.losses.SigmoidFocalCrossEntropy() }, | |
metrics={'output': 'accuracy'}) | |
else: | |
model.compile(optimizer=OPT, #'adam', | |
loss={'output': CAT_LOSS}, #'categorical_crossentropy' | |
metrics={'output': 'accuracy'}) | |
print("==========================================") | |
print('[INFORMATION] Model summary:') | |
model.summary() | |
return model | |
###=================================================== | |
def make_sedinet_siso_simo(vars, greyscale, dropout): | |
""" | |
This function creates an implementation of SediNet for estimating | |
sediment metric on a continuous scale | |
""" | |
base = BASE_CONT ##30 ## suggested range = 20 -- 40 | |
if greyscale==True: | |
input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 1)) | |
else: | |
input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) | |
_ = conv_block2(input_layer, filters=base, bn=False, pool=False, drop=False) #x # | |
_ = conv_block2(_, filters=base*2, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*3, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*4, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*5, bn=False, pool=True,drop=False) | |
if not SHALLOW: | |
_ = conv_block2(_, filters=base*6, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*7, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*8, bn=False, pool=True,drop=False) | |
_ = conv_block2(_, filters=base*9, bn=False, pool=True,drop=False) | |
_ = BatchNormalization(axis=-1)(_) | |
bottleneck = GlobalMaxPool2D()(_) | |
bottleneck = Dropout(dropout)(bottleneck) | |
units = CONT_DENSE_UNITS ## suggested range 512 -- 1024 | |
_ = Dense(units=units, activation='relu')(bottleneck) #'relu' | |
##would it be better to predict the full vector directly instread of one by one? | |
outputs = [] | |
for var in vars: | |
outputs.append(Dense(units=1, activation='linear', name=var+'_output')(_) ) #relu | |
if CONT_LOSS == 'pinball': | |
loss = dict(zip([k+"_output" for k in vars], [tfa.losses.PinballLoss(tau=.5) for k in vars])) | |
else: ## 'mse' | |
loss = dict(zip([k+"_output" for k in vars], ['mse' for k in vars])) #loss = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) # Sum of squared error | |
metrics = dict(zip([k+"_output" for k in vars], ['mae' for k in vars])) | |
OPT = tf.keras.optimizers.Adam(learning_rate=MAX_LR) | |
model = Model(inputs=input_layer, outputs=outputs) | |
model.compile(optimizer=OPT,loss=loss, metrics=metrics) | |
#print("==========================================") | |
#print('[INFORMATION] Model summary:') | |
#model.summary() | |
return model | |