geninhu commited on
Commit
084afeb
1 Parent(s): 0ab0244

Add application file

Browse files
.gitattributes CHANGED
@@ -25,3 +25,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ examples/example_1.jpg filter=lfs diff=lfs merge=lfs -text
29
+ examples/example_2.jpg filter=lfs diff=lfs merge=lfs -text
30
+ examples/example_3.jpg filter=lfs diff=lfs merge=lfs -text
31
+ examples/example_4.jpg filter=lfs diff=lfs merge=lfs -text
32
+ examples/example_5.jpg filter=lfs diff=lfs merge=lfs -text
33
+ examples/example_6.jpg filter=lfs diff=lfs merge=lfs -text
34
+ examples/example_7.jpg filter=lfs diff=lfs merge=lfs -text
35
+ examples/example_8.jpg filter=lfs diff=lfs merge=lfs -text
36
+ examples/example_9.jpg filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import pandas as pd
4
+
5
+ import gradio as gr
6
+ from huggingface_hub import from_pretrained_fastai
7
+ from fastai.vision.all import *
8
+ from torchvision.models import vgg19, vgg16
9
+ from utils import *
10
+
11
+ pascal_source = '.'
12
+ EXAMPLES_PATH = Path('/content/examples')
13
+ repo_id = "hugginglearners/fastai-style-transfer"
14
+
15
+ _vgg_config = {
16
+ 'vgg16' : [1, 11, 18, 25, 20],
17
+ 'vgg19' : [1, 6, 11, 20, 29, 22]
18
+ }
19
+
20
+ feat_net, layers = _get_layers('vgg19', True)
21
+ hooks = hook_outputs(layers, detach=False)
22
+
23
+ learner = from_pretrained_fastai(repo_id)
24
+
25
+ def infer(img):
26
+ pred = learner.predict(img)
27
+ image = pred[0].cpu().numpy()
28
+ image = image.transpose((1, 2, 0))
29
+ plt.imshow(image)
30
+ return plt.gcf() #pred[0].show()
31
+
32
+ # get the inputs
33
+ inputs = gr.inputs.Image(shape=(192, 192))
34
+
35
+ # the app outputs two segmented images
36
+ output = gr.Plot()
37
+ # it's good practice to pass examples, description and a title to guide users
38
+ title = 'Style transfer'
39
+ description = ''
40
+ article = "Author: <a href=\"https://huggingface.co/geninhu\">Nhu Hoang</a>. "
41
+ examples = [f'{EXAMPLES_PATH}/{f.name}' for f in EXAMPLES_PATH.iterdir()]
42
+
43
+ gr.Interface(infer, inputs, output, examples= examples, allow_flagging='never',
44
+ title=title, description=description, article=article, live=False).launch(enable_queue=True, debug=False, inbrowser=True)
examples/example_1.jpg ADDED

Git LFS Details

  • SHA256: 2ce64fba7e31e909f598320b6f675102c8dd522afe03232a65931b9c8aaae00a
  • Pointer size: 130 Bytes
  • Size of remote file: 82.4 kB
examples/example_2.jpg ADDED

Git LFS Details

  • SHA256: 3d6c5696736118b5f50a4b6f22d252a25ba1bab6c19f9ac8935e27cd7ebf5183
  • Pointer size: 130 Bytes
  • Size of remote file: 68.6 kB
examples/example_3.jpg ADDED

Git LFS Details

  • SHA256: b93c1343f71f3d9acabe139c6254a5bda430900e2264741cb0d15b49b79cd888
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
examples/example_4.jpg ADDED

Git LFS Details

  • SHA256: 8fad8066f6d7da36e84d56561ea55ce1bf7e414dc95570e1e5efef0d27934e2c
  • Pointer size: 131 Bytes
  • Size of remote file: 101 kB
examples/example_5.jpg ADDED

Git LFS Details

  • SHA256: ad57837f9af85dde7c922292d6b06038540929f807ddff04db7d4ef9cda39c38
  • Pointer size: 131 Bytes
  • Size of remote file: 105 kB
examples/example_6.jpg ADDED

Git LFS Details

  • SHA256: d533881bae407cb90ce12a53f924150f9eaf6231aa56b38036b46117fbf85bde
  • Pointer size: 131 Bytes
  • Size of remote file: 172 kB
examples/example_7.jpg ADDED

Git LFS Details

  • SHA256: ed3b1e65a3a1dab30f176436046370a3055198fbf6d54aa7e4741e7c7e63cd6d
  • Pointer size: 131 Bytes
  • Size of remote file: 148 kB
examples/example_8.jpg ADDED

Git LFS Details

  • SHA256: 3d47bfd8ecc704eda1a0a9e12482e7f1b11cf837fe5cf867f5059fc5ff0eaf20
  • Pointer size: 131 Bytes
  • Size of remote file: 141 kB
examples/example_9.jpg ADDED

Git LFS Details

  • SHA256: b44c51f82532921c5a68a4bfeb37390449054883aca31f3a747a8c7f0541dce0
  • Pointer size: 130 Bytes
  • Size of remote file: 86.5 kB
utils.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import pandas as pd
4
+
5
+ import gradio as gr
6
+ from huggingface_hub import from_pretrained_fastai
7
+ from fastai.vision.all import *
8
+ from torchvision.models import vgg19, vgg16
9
+
10
+ pascal_source = '.'
11
+ EXAMPLES_PATH = Path('/content/examples')
12
+ repo_id = "hugginglearners/fastai-style-transfer"
13
+
14
+ def get_stl_fs(fs): return fs[:-1]
15
+
16
+ def style_loss(inp:Tensor, out_feat:Tensor):
17
+ "Calculate style loss, assumes we have `im_grams`"
18
+ # Get batch size
19
+ bs = inp[0].shape[0]
20
+ loss = []
21
+ # For every item in our inputs
22
+ for y, f in zip(*map(get_stl_fs, [im_grams, inp])):
23
+ # Calculate MSE
24
+ loss.append(F.mse_loss(y.repeat(bs, 1, 1), gram(f)))
25
+ # Multiply their sum by 30000
26
+ return 3e5 * sum(loss)
27
+
28
+ class FeatureLoss(Module):
29
+ "Combines two losses and features into a useable loss function"
30
+ def __init__(self, feats, style_loss, act_loss, hooks, feat_net):
31
+ store_attr()
32
+ self.hooks = hooks
33
+ self.feat_net = feat_net
34
+ self.reset_metrics()
35
+
36
+ def forward(self, pred, targ):
37
+ # First get the features of our prediction and target
38
+ pred_feat, targ_feat = self.feats(self.feat_net, self.hooks, pred), self.feats(self.feat_net, self.hooks, targ)
39
+ # Calculate style and activation loss
40
+ style_loss = self.style_loss(pred_feat, targ_feat)
41
+ act_loss = self.act_loss(pred_feat, targ_feat)
42
+ # Store the loss
43
+ self._add_loss(style_loss, act_loss)
44
+ # Return the sum
45
+ return style_loss + act_loss
46
+
47
+ def reset_metrics(self):
48
+ # Generates a blank metric
49
+ self.metrics = dict(style = [], content = [])
50
+
51
+ def _add_loss(self, style_loss, act_loss):
52
+ # Add to our metrics
53
+ self.metrics['style'].append(style_loss)
54
+ self.metrics['content'].append(act_loss)
55
+
56
+ def act_loss(inp:Tensor, targ:Tensor):
57
+ "Calculate the MSE loss of the activation layers"
58
+ return F.mse_loss(inp[-1], targ[-1])
59
+
60
+ class ReflectionLayer(Module):
61
+ "A series of Reflection Padding followed by a ConvLayer"
62
+ def __init__(self, in_channels, out_channels, ks=3, stride=2):
63
+ reflection_padding = ks // 2
64
+ self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
65
+ self.conv2d = nn.Conv2d(in_channels, out_channels, ks, stride)
66
+
67
+ def forward(self, x):
68
+ out = self.reflection_pad(x)
69
+ out = self.conv2d(out)
70
+ return out
71
+
72
+ class ResidualBlock(Module):
73
+ "Two reflection layers and an added activation function with residual"
74
+ def __init__(self, channels):
75
+ self.conv1 = ReflectionLayer(channels, channels, ks=3, stride=1)
76
+ self.in1 = nn.InstanceNorm2d(channels, affine=True)
77
+ self.conv2 = ReflectionLayer(channels, channels, ks=3, stride=1)
78
+ self.in2 = nn.InstanceNorm2d(channels, affine=True)
79
+ self.relu = nn.ReLU()
80
+
81
+ def forward(self, x):
82
+ residual = x
83
+ out = self.relu(self.in1(self.conv1(x)))
84
+ out = self.in2(self.conv2(out))
85
+ out = out + residual
86
+ return out
87
+
88
+ class UpsampleConvLayer(Module):
89
+ "Upsample with a ReflectionLayer"
90
+ def __init__(self, in_channels, out_channels, ks=3, stride=1, upsample=None):
91
+ self.upsample = upsample
92
+ reflection_padding = ks // 2
93
+ self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
94
+ self.conv2d = nn.Conv2d(in_channels, out_channels, ks, stride)
95
+
96
+ def forward(self, x):
97
+ x_in = x
98
+ if self.upsample:
99
+ x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample)
100
+ out = self.reflection_pad(x_in)
101
+ out = self.conv2d(out)
102
+ return out
103
+
104
+ class TransformerNet(Module):
105
+ "A simple network for style transfer"
106
+ def __init__(self):
107
+ # Initial convolution layers
108
+ self.conv1 = ReflectionLayer(3, 32, ks=9, stride=1)
109
+ self.in1 = nn.InstanceNorm2d(32, affine=True)
110
+ self.conv2 = ReflectionLayer(32, 64, ks=3, stride=2)
111
+ self.in2 = nn.InstanceNorm2d(64, affine=True)
112
+ self.conv3 = ReflectionLayer(64, 128, ks=3, stride=2)
113
+ self.in3 = nn.InstanceNorm2d(128, affine=True)
114
+ # Residual layers
115
+ self.res1 = ResidualBlock(128)
116
+ self.res2 = ResidualBlock(128)
117
+ self.res3 = ResidualBlock(128)
118
+ self.res4 = ResidualBlock(128)
119
+ self.res5 = ResidualBlock(128)
120
+ # Upsampling Layers
121
+ self.deconv1 = UpsampleConvLayer(128, 64, ks=3, stride=1, upsample=2)
122
+ self.in4 = nn.InstanceNorm2d(64, affine=True)
123
+ self.deconv2 = UpsampleConvLayer(64, 32, ks=3, stride=1, upsample=2)
124
+ self.in5 = nn.InstanceNorm2d(32, affine=True)
125
+ self.deconv3 = ReflectionLayer(32, 3, ks=9, stride=1)
126
+ # Non-linearities
127
+ self.relu = nn.ReLU()
128
+
129
+ def forward(self, X):
130
+ y = self.relu(self.in1(self.conv1(X)))
131
+ y = self.relu(self.in2(self.conv2(y)))
132
+ y = self.relu(self.in3(self.conv3(y)))
133
+ y = self.res1(y)
134
+ y = self.res2(y)
135
+ y = self.res3(y)
136
+ y = self.res4(y)
137
+ y = self.res5(y)
138
+ y = self.relu(self.in4(self.deconv1(y)))
139
+ y = self.relu(self.in5(self.deconv2(y)))
140
+ y = self.deconv3(y)
141
+ return y
142
+
143
+ def _inner(feat_net, hooks, x):
144
+ feat_net(x)
145
+ return hooks.stored
146
+
147
+ def _get_layers(arch:str, pretrained=True):
148
+ "Get the layers and arch for a VGG Model (16 and 19 are supported only)"
149
+ feat_net = vgg19(pretrained=pretrained).cuda() if arch.find('9') > 1 else vgg16(pretrained=pretrained).cuda()
150
+ config = _vgg_config.get(arch)
151
+ features = feat_net.features.cuda().eval()
152
+ for p in features.parameters(): p.requires_grad=False
153
+ return feat_net, [features[i] for i in config]