MuhammadALELIWI commited on
Commit
c29c0e1
1 Parent(s): 70ecf29

modify app

Browse files
Files changed (3) hide show
  1. README.md +13 -0
  2. app.py +106 -0
  3. requirements.txt +4 -0
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: NeuralStyleTransfer ResUNET
3
+ emoji: 🏢
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 2.9.4
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import keras
3
+ from keras.models import Model
4
+ import keras.losses
5
+ from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, \
6
+ Input, concatenate, BatchNormalization, LeakyReLU, AveragePooling2D, Activation, Add
7
+ from tensorflow.keras.activations import tanh, softplus#softplus=log(exp(x) + 1)
8
+ import numpy as np
9
+ import cv2
10
+ import gradio as gr
11
+ def normalize(arr):#[0,255]to [-1,1]
12
+ return 2 * (arr / 255) - 1
13
+
14
+ def denormalize(arr):
15
+ return ((arr + 1) / 2) * 255
16
+
17
+ mish = lambda x: x * tanh(softplus(x))
18
+
19
+ def get_model(input_shape, is_bn):
20
+
21
+ def get_block_left(nfilters, conv_input, pool=True):
22
+ res_i = Conv2D(filters=nfilters, kernel_size=1, use_bias=False, strides=1, padding='same')(conv_input)
23
+
24
+ conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(conv_input)
25
+ if is_bn:
26
+ conv_i = BatchNormalization()(conv_i)
27
+ conv_i = Activation(mish)(conv_i)
28
+
29
+ conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(conv_i)
30
+ res_conv_i = Add()([conv_i, res_i])
31
+
32
+ if is_bn:
33
+ res_conv_i = BatchNormalization()(res_conv_i)
34
+ res_conv_i = Activation(mish)(res_conv_i)
35
+
36
+ if not pool:
37
+ return res_conv_i
38
+
39
+ pool_i = MaxPooling2D(pool_size=(2, 2))(res_conv_i)
40
+ return [res_conv_i, pool_i]
41
+
42
+
43
+ def get_block_right(nfilters, conv_input, cross_conv):
44
+ inv_i = Conv2DTranspose(filters=nfilters, kernel_size=3, strides=2, padding='same') (conv_input)
45
+ cat_i = concatenate([inv_i, cross_conv])
46
+
47
+ res_i = Conv2D(filters=nfilters, kernel_size=1, use_bias=False, strides=1, padding='same')(cat_i)
48
+
49
+ conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(cat_i)
50
+ if is_bn:
51
+ conv_i = BatchNormalization()(conv_i)
52
+ conv_i = Activation(mish)(conv_i)
53
+
54
+ conv_i = Conv2D(filters=nfilters, kernel_size=3, strides=1, padding='same')(conv_i)
55
+ res_conv_i = Add()([conv_i, res_i])
56
+
57
+ if is_bn:
58
+ res_conv_i = BatchNormalization()(res_conv_i)
59
+ res_conv_i = Activation(mish)(res_conv_i)
60
+ return res_conv_i
61
+
62
+
63
+
64
+ # input
65
+ inp_1 = Input(input_shape)#Number of Channels = 4 represent Alpha, Red, Green and Blue channels
66
+
67
+ # first part
68
+ conv1, pool1 = get_block_left(16, inp_1)
69
+ conv2, pool2 = get_block_left(32, pool1)
70
+ conv3, pool3 = get_block_left(64, pool2)
71
+ conv4, pool4 = get_block_left(128, pool3)
72
+ conv5, pool5 = get_block_left(256, pool4)
73
+ conv6 = get_block_left(512, pool5, False)
74
+ # second part
75
+ rconv1 = get_block_right(256, conv6, conv5)
76
+ rconv2 = get_block_right(128, rconv1, conv4)
77
+ rconv3 = get_block_right(64, rconv2, conv3)
78
+ rconv4 = get_block_right(32, rconv3, conv2)
79
+ rconv5 = get_block_right(16, rconv4, conv1)
80
+
81
+
82
+ # output
83
+ out_1 = Conv2D(filters=3, kernel_size=1, strides=1, padding='same')(rconv5)
84
+ out_1 = Activation('tanh')(out_1)
85
+
86
+ # create model
87
+ model = Model(inputs = inp_1, outputs = out_1)
88
+
89
+ # build
90
+ model.compile(loss=keras.losses.mse, optimizer='adam')
91
+
92
+ return model
93
+
94
+ def predict_one( x):
95
+ x = cv2.resize(x, (256, 256))
96
+ x = normalize(x)
97
+ h = model.predict(np.array([x]))[0]
98
+ h = denormalize(h)
99
+ return np.uint8(h)
100
+
101
+
102
+ model = get_model((256, 256, 3), True)
103
+ model.load_weights('./resweights.h5')
104
+
105
+ # Write 1 line of Python to create a simple GUI
106
+ gr.Interface(fn=predict_one, allow_flagging= "never", inputs="image", outputs="image", description="The project aims at developing a software program capable of manipulating digital images to adopt the appearance or visual style of another image. Our objective is to create an artificial artwork from photographs, for example by transferring the appearance of famous paintings such as Van Kof’s paintings to user-supplied photographs. This type of application is called style transfer. However, famous style-transfer algorithms need lots of time to produce one image, it maybe takes 30 minutes running on a normal PC. So my project focus on reproducing the image in seconds.").launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ tensorflow==2.8.0
2
+ keras==2.8.0
3
+ numpy==1.22.3
4
+ opencv-python==4.5.5.64