Spaces:
Runtime error
Runtime error
Add applications files and generator saved models.
Browse files- G_XtoY.pth +3 -0
- G_YtoX.pth +3 -0
- app.py +49 -0
- archs.py +32 -0
- layers.py +49 -0
G_XtoY.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee019829cf291aec2352127f8876130196fa60bcaf284ab7e788c96d1191228a
|
3 |
+
size 2270536
|
G_YtoX.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe687fc70da37688ce07a43d4f5c582e33e59001214c2b361213bce0ecc9b6c5
|
3 |
+
size 2270536
|
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from torchvision import transforms
|
4 |
+
from archs import CycleGenerator
|
5 |
+
import warnings
|
6 |
+
|
7 |
+
warnings.filterwarnings("ignore")
|
8 |
+
|
9 |
+
# instantiate generators
|
10 |
+
G_XtoY = CycleGenerator(conv_dim=64) # Apple -> Windows
|
11 |
+
G_YtoX = CycleGenerator(conv_dim=64) # Windows -> Apple
|
12 |
+
|
13 |
+
# load weights (on CPU because Huggingface does not provide free GPU computing)
|
14 |
+
device = torch.device('cpu')
|
15 |
+
G_XtoY.load_state_dict(torch.load('G_XtoY.pth', map_location=device)); G_XtoY.eval()
|
16 |
+
G_YtoX.load_state_dict(torch.load('G_YtoX.pth', map_location=device)); G_YtoX.eval()
|
17 |
+
|
18 |
+
def generate(input_image, radio):
|
19 |
+
transform = transforms.Compose([
|
20 |
+
transforms.Resize(32),
|
21 |
+
transforms.ToTensor(),
|
22 |
+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
|
23 |
+
])
|
24 |
+
input_image = transform(input_image).unsqueeze(0)
|
25 |
+
with torch.no_grad():
|
26 |
+
if radio == 'Apple to Windows':
|
27 |
+
out = G_XtoY(input_image).squeeze().numpy()
|
28 |
+
else:
|
29 |
+
out = G_YtoX(input_image).squeeze().numpy()
|
30 |
+
return out.transpose(1, 2, 0)
|
31 |
+
|
32 |
+
|
33 |
+
input_image = gr.inputs.Image(source="upload", type="pil", label="Input Image")
|
34 |
+
radio = gr.inputs.Radio(
|
35 |
+
choices=['Apple to Windows', 'Windows to Apple'],
|
36 |
+
label="Choose Conversion"
|
37 |
+
)
|
38 |
+
|
39 |
+
output_image = gr.outputs.Image(type="numpy", label="Converted Image")
|
40 |
+
|
41 |
+
iface = gr.Interface(
|
42 |
+
generate,
|
43 |
+
[input_image, radio],
|
44 |
+
output_image,
|
45 |
+
title = "Apple/Windows Style Emoji Conversion Using Cycle-GAN",
|
46 |
+
article = "By: Arian Tashakkor"
|
47 |
+
)
|
48 |
+
|
49 |
+
iface.launch()
|
archs.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from layers import *
|
2 |
+
from torch.nn import functional as F
|
3 |
+
|
4 |
+
class CycleGenerator(nn.Module):
|
5 |
+
def __init__(self, conv_dim=64):
|
6 |
+
super(CycleGenerator, self).__init__()
|
7 |
+
|
8 |
+
self.conv1 = conv(3, conv_dim, 4)
|
9 |
+
self.conv2 = conv(conv_dim, conv_dim * 2, 4)
|
10 |
+
# experiment with number of residual_blocks
|
11 |
+
self.res_block1 = ResidualBlock(conv_dim * 2)
|
12 |
+
self.res_block2 = ResidualBlock(conv_dim * 2)
|
13 |
+
self.deconv1 = deconv(conv_dim * 2, conv_dim, 4)
|
14 |
+
self.deconv2 = deconv(conv_dim, 3, 4, norm=False)
|
15 |
+
|
16 |
+
def forward(self, x):
|
17 |
+
"""Generates an image conditioned on an input image.
|
18 |
+
Input
|
19 |
+
-----
|
20 |
+
x: BS x 3 x 32 x 32
|
21 |
+
Output
|
22 |
+
------
|
23 |
+
out: BS x 3 x 32 x 32
|
24 |
+
"""
|
25 |
+
out = F.relu(self.conv1(x))
|
26 |
+
out = F.relu(self.conv2(out))
|
27 |
+
out = F.relu(self.res_block1(out))
|
28 |
+
out = F.relu(self.res_block2(out))
|
29 |
+
out = F.relu(self.deconv1(out))
|
30 |
+
out = F.tanh(self.deconv2(out))
|
31 |
+
|
32 |
+
return out
|
layers.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import nn
|
2 |
+
## LAYER UTILITIES ####################################################################
|
3 |
+
def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, norm=True, norm_mode='batch'):
|
4 |
+
"""Creates a transposed-convolutional layer, with optional batch/instance normalization.
|
5 |
+
"""
|
6 |
+
layers = []
|
7 |
+
layers.append(
|
8 |
+
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False)
|
9 |
+
)
|
10 |
+
if norm:
|
11 |
+
if norm_mode == 'instance':
|
12 |
+
layers.append(
|
13 |
+
nn.InstanceNorm2d(out_channels)
|
14 |
+
)
|
15 |
+
elif norm_mode == 'batch':
|
16 |
+
layers.append(
|
17 |
+
nn.BatchNorm2d(out_channels)
|
18 |
+
)
|
19 |
+
return nn.Sequential(*layers)
|
20 |
+
|
21 |
+
def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, norm=True, norm_mode='batch'):
|
22 |
+
"""Creates a convolutional layer, with optional batch/instance normalization.
|
23 |
+
"""
|
24 |
+
layers = []
|
25 |
+
layers.append(
|
26 |
+
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False)
|
27 |
+
)
|
28 |
+
if norm:
|
29 |
+
if norm_mode == 'instance':
|
30 |
+
layers.append(
|
31 |
+
nn.InstanceNorm2d(out_channels)
|
32 |
+
)
|
33 |
+
elif norm_mode == 'batch':
|
34 |
+
layers.append(
|
35 |
+
nn.BatchNorm2d(out_channels)
|
36 |
+
)
|
37 |
+
return nn.Sequential(*layers)
|
38 |
+
|
39 |
+
class ResidualBlock(nn.Module):
|
40 |
+
"""Instatiates a residual block with kernel_size = 3
|
41 |
+
"""
|
42 |
+
def __init__(self, conv_dim):
|
43 |
+
super(ResidualBlock, self).__init__()
|
44 |
+
self._conv = conv(
|
45 |
+
conv_dim, conv_dim, kernel_size=3, stride=1, padding=1
|
46 |
+
)
|
47 |
+
|
48 |
+
def forward(self, x):
|
49 |
+
return x + self._conv(x)
|