File size: 3,992 Bytes
8245985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import tensorflow as tf
import keras
from keras.models import Model
import keras.losses
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, \
Input, concatenate, BatchNormalization, LeakyReLU, AveragePooling2D, Activation, Add
from tensorflow.keras.activations import tanh, softplus#softplus=log(exp(x) + 1)
import numpy as np
import cv2
import gradio as gr
def normalize(arr):#[0,255]to [-1,1]
  return 2 * (arr / 255) - 1

def denormalize(arr):
  return ((arr + 1) / 2) * 255

mish = lambda x: x * tanh(softplus(x))

def get_model(input_shape, is_bn):

  def get_block_left(nfilters, conv_input, pool=True):
    res_i = Conv2D(filters=nfilters, kernel_size=1, use_bias=False, strides=1, padding='same')(conv_input)

    conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(conv_input)
    if is_bn:
      conv_i = BatchNormalization()(conv_i)
    conv_i = Activation(mish)(conv_i)
    
    conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(conv_i)
    res_conv_i = Add()([conv_i, res_i])
    
    if is_bn:
      res_conv_i = BatchNormalization()(res_conv_i)
    res_conv_i = Activation(mish)(res_conv_i)

    if not pool:
      return res_conv_i

    pool_i = MaxPooling2D(pool_size=(2, 2))(res_conv_i)
    return [res_conv_i, pool_i]


  def get_block_right(nfilters, conv_input, cross_conv):
    inv_i = Conv2DTranspose(filters=nfilters, kernel_size=3, strides=2, padding='same') (conv_input)
    cat_i = concatenate([inv_i, cross_conv])

    res_i = Conv2D(filters=nfilters, kernel_size=1, use_bias=False, strides=1, padding='same')(cat_i)

    conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(cat_i)
    if is_bn:
      conv_i = BatchNormalization()(conv_i)
    conv_i = Activation(mish)(conv_i)

    conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(conv_i)
    res_conv_i = Add()([conv_i, res_i])

    if is_bn:
      res_conv_i = BatchNormalization()(res_conv_i)
    res_conv_i = Activation(mish)(res_conv_i)
    return res_conv_i



    # input
  inp_1 =  Input(input_shape)#Number of Channels = 4 represent Alpha, Red, Green and Blue channels
    
    # first part
  conv1, pool1 = get_block_left(16, inp_1)
  conv2, pool2 = get_block_left(32, pool1)
  conv3, pool3 = get_block_left(64, pool2)
  conv4, pool4 = get_block_left(128, pool3)
  conv5, pool5 = get_block_left(256, pool4)
  conv6 = get_block_left(512, pool5, False)
    # second part
  rconv1 = get_block_right(256, conv6, conv5)
  rconv2 = get_block_right(128, rconv1, conv4)
  rconv3 = get_block_right(64, rconv2, conv3)
  rconv4 = get_block_right(32, rconv3, conv2)
  rconv5 = get_block_right(16, rconv4, conv1)
    
    
    # output
  out_1 = Conv2D(filters=3, kernel_size=1, strides=1, padding='same')(rconv5)
  out_1 = Activation('tanh')(out_1)

    # create model
  model = Model(inputs = inp_1, outputs = out_1)
    
    # build
  model.compile(loss=keras.losses.mse, optimizer='adam')

  return model

def predict_one( x):
    x = cv2.resize(x, (256, 256))
    x = normalize(x)
    h = model.predict(np.array([x]))[0]
    h = denormalize(h)
    return np.uint8(h)


model = get_model((256, 256, 3), True)
model.load_weights('./resweights.h5')

# Write 1 line of Python to create a simple GUI
gr.Interface(fn=predict_one, allow_flagging= "never", inputs="image", outputs="image", description="The project aims at developing a software program capable of manipulating digital images to adopt the appearance or visual style of another image. Our objective is to create an artificial artwork from photographs, for example by transferring the appearance of famous paintings such as Van Kof’s paintings to user-supplied photographs. This type of application is called style transfer. However, famous style-transfer algorithms need lots of time to produce one image, it maybe takes 30 minutes running on a normal PC. So my project focus on reproducing the image in seconds.").launch();