File size: 6,044 Bytes
6f016a6
 
 
173f72d
b8fc23d
6f016a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b8fc23d
 
 
f2a18a1
 
 
 
 
 
 
173f72d
6f016a6
b8fc23d
fc562e6
 
173f72d
b8fc23d
 
 
 
 
 
 
 
 
 
 
 
f2a18a1
ca1308e
6f016a6
ca1308e
 
 
 
 
 
 
 
 
 
 
 
c0a0037
ca1308e
 
173f72d
 
 
b8fc23d
ca1308e
b8fc23d
ca1308e
fc562e6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import numpy
import keras
import gradio
import matplotlib.pyplot
import matplotlib.colors

# Building the neural network
model1 = keras.models.Sequential()
model1.add(keras.layers.InputLayer(input_shape=(101, 636, 1)))
model1.add(keras.layers.Conv2D(4, (9, 9), activation='relu', padding='same', strides=1))
model1.add(keras.layers.Conv2D(4, (9, 9), activation='relu', padding='same'))
model1.add(keras.layers.Conv2D(8, (7, 7), activation='relu', padding='same', strides=1))
model1.add(keras.layers.Conv2D(8, (7, 7), activation='relu', padding='same'))
model1.add(keras.layers.Conv2D(16, (5, 5), activation='relu', padding='same'))
model1.add(keras.layers.Conv2D(16, (5, 5), activation='relu', padding='same', strides=1))
model1.add(keras.layers.Conv2D(16, (3, 3), activation='relu', padding='same'))
model1.add(keras.layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=1))
model1.add(keras.layers.Conv2D(16, (2, 2), activation='relu', padding='same'))
model1.add(keras.layers.Conv2D(16, (2, 2), activation='relu', padding='same', strides=1))
model1.add(keras.layers.UpSampling2D((1, 1)))
model1.add(keras.layers.Conv2D(16, (2, 2), activation='relu', padding='same'))
model1.add(keras.layers.UpSampling2D((1, 1)))
model1.add(keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same'))
model1.add(keras.layers.UpSampling2D((1, 1)))
model1.add(keras.layers.Conv2D(4, (7, 7), activation='tanh', padding='same'))
model1.add(keras.layers.UpSampling2D((1, 1)))
model1.add(keras.layers.Conv2D(3, (9, 9), activation='tanh', padding='same'))

#Loading the weights in the architecture (The file should be stored in the same directory as the code)
model1.load_weights('modelV13_500trained_1.h5')

# Get the color map by name:
cm = matplotlib.pyplot.get_cmap('RdBu')

#simple image scaling to (nR x nC) size
def scale(im, nR, nC):
  nR0 = len(im)     # source number of rows 
  nC0 = len(im[0])  # source number of columns 
  return numpy.array([[ im[int(nR0 * r / nR)][int(nC0 * c / nC)]  
             for c in range(nC)] for r in range(nR)])


def predict(mask):
    scaled_mask = numpy.ones((101, 636)) if mask is None else numpy.round(scale(mask, 101, 636)/255.0)
    print(scaled_mask)
    X = scaled_mask[numpy.newaxis, :, :, numpy.newaxis]    
    v = model1.predict(X)
    measure = max(v.max(), -v.min())
    output = (v / measure)
    legend = "<h2>Strain</h2><table style=\"width:100%\"><tr>"
    for i in range(11):
        color = cm(i/10.0)[:3]
        value = -measure + i*2*measure/10
        print(sum(list(color)))
        hex = matplotlib.colors.to_hex(list(color))
        text_color = "black" if sum(list(color)) > 2.0 else "white"
        legend = legend + f"<td style=\"background-color: {hex}; color: {text_color}\">{value:+.2e}</td>"
    legend = legend + "</tr></table>"
    return cm((numpy.multiply(output[0, :, :, 0], scaled_mask)+1.0)/2.0), cm((numpy.multiply(output[0, :, :, 1], scaled_mask)+1.0)/2.0), cm((numpy.multiply(output[0, :, :, 2], scaled_mask)+1.0)/2.0), legend

with gradio.Blocks() as demo:

    with gradio.Accordion("✨ Read about the ML model here! ✨", open=False):
        with gradio.Row():
            with gradio.Column():
                gradio.Markdown("# Predicting elastic strain fields in defective microstructures using image colorization algorithms")
                gradio.HTML("Pranav Khanolkar, Penn State<br/>Christopher McComb, Carnegie Mellon University<br/>Saurabh Basu, Penn State")
                gradio.Markdown("_Abstract_: In this work, an image colorization algorithm based on convolutional neural networks is explored as an approach to predict tensile plane-strain field components of microstructures featuring porosity defects. For the same, microstructures featuring porosity of various shapes, sizes, area fractions and number densities were sampled on the gage section of ASTM-E8 sized numerical specimens whose tensile deformation was simulated in plane strain mode using commercial finite element analysis package Abaqus. Subsequently, the image colorization algorithm was trained by treating the microstructure featuring porosity defects as the gray scale image, and its strain field components as its color layers, analogous to the red-green-blue color components of traditional digital representations of images. Towards the same, various CNN frameworks were tested for optimization of its parameters, viz. number of layers, number of filters in each layer, stride, padding, and activation function. An optimized CNN framework is presented that is able to predict strain fields on randomly sampled microstructures with high accuracy at a fraction of the time that finite element analysis would take. Various cross-validation tests were performed to test the accuracy and robustness of the CNN in learning features of various microstructures. Results indicated that the CNN algorithm is extremely robust and can provide near-accurate strain fields in generic scenarios.")
            with gradio.Column():
                download = gradio.HTML("<a href=\"https://huggingface.co/spaces/cmudrc/microstructure-strain/resolve/main/1-s2.0-S0927025620305590-main.pdf\" style=\"width: 60%; display: block; margin: auto;\"><img src=\"https://huggingface.co/spaces/cmudrc/microstructure-strain/resolve/main/coverpage.png\"></a>")
    
    gradio.Markdown("It can be challenging to rapidly infer the stress and strain that are present in a material with a complex microstructure. This demo runs a rapid surrogate model to compute strain for the microstructure that you draw!")     

    mask = gradio.Image(image_mode="L", source="canvas", label="microstructure")    
    mask.update(fn=predict, inputs=[mask], outputs=[exx, eyy, exy, legend])

    btn = gradio.Button("Run!", variant="primary")
    exx = gradio.Image(label="ε-xx") 
    eyy = gradio.Image(label="ε-yy") 
    exy = gradio.Image(label="ε-xy")
    legend = gradio.HTML(label="", value="")

    btn.click(fn=predict, inputs=[mask], outputs=[exx, eyy, exy, legend])

demo.launch(debug=True)