Spaces:
Runtime error
Runtime error
forgot deletion
Browse files- cyclegan_inference.py +0 -78
- generator.pth +0 -3
cyclegan_inference.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
import torchvision
|
4 |
-
from torch import nn
|
5 |
-
from typing import List
|
6 |
-
|
7 |
-
def ifnone(a, b): # a fastai-specific (fastcore) function used below, redefined so it's independent
|
8 |
-
"`b` if `a` is None else `a`"
|
9 |
-
return b if a is None else a
|
10 |
-
|
11 |
-
def convT_norm_relu(ch_in:int, ch_out:int, norm_layer:nn.Module, ks:int=3, stride:int=2, bias:bool=True):
|
12 |
-
return [nn.ConvTranspose2d(ch_in, ch_out, kernel_size=ks, stride=stride, padding=1, output_padding=1, bias=bias),
|
13 |
-
norm_layer(ch_out), nn.ReLU(True)]
|
14 |
-
|
15 |
-
def pad_conv_norm_relu(ch_in:int, ch_out:int, pad_mode:str, norm_layer:nn.Module, ks:int=3, bias:bool=True,
|
16 |
-
pad=1, stride:int=1, activ:bool=True, init=nn.init.kaiming_normal_, init_gain:int=0.02)->List[nn.Module]:
|
17 |
-
layers = []
|
18 |
-
if pad_mode == 'reflection': layers.append(nn.ReflectionPad2d(pad))
|
19 |
-
elif pad_mode == 'border': layers.append(nn.ReplicationPad2d(pad))
|
20 |
-
p = pad if pad_mode == 'zeros' else 0
|
21 |
-
conv = nn.Conv2d(ch_in, ch_out, kernel_size=ks, padding=p, stride=stride, bias=bias)
|
22 |
-
if init:
|
23 |
-
if init == nn.init.normal_:
|
24 |
-
init(conv.weight, 0.0, init_gain)
|
25 |
-
else:
|
26 |
-
init(conv.weight)
|
27 |
-
if hasattr(conv, 'bias') and hasattr(conv.bias, 'data'): conv.bias.data.fill_(0.)
|
28 |
-
layers += [conv, norm_layer(ch_out)]
|
29 |
-
if activ: layers.append(nn.ReLU(inplace=True))
|
30 |
-
return layers
|
31 |
-
|
32 |
-
class ResnetBlock(nn.Module):
|
33 |
-
"nn.Module for the ResNet Block"
|
34 |
-
def __init__(self, dim:int, pad_mode:str='reflection', norm_layer:nn.Module=None, dropout:float=0., bias:bool=True):
|
35 |
-
super().__init__()
|
36 |
-
assert pad_mode in ['zeros', 'reflection', 'border'], f'padding {pad_mode} not implemented.'
|
37 |
-
norm_layer = ifnone(norm_layer, nn.InstanceNorm2d)
|
38 |
-
layers = pad_conv_norm_relu(dim, dim, pad_mode, norm_layer, bias=bias)
|
39 |
-
if dropout != 0: layers.append(nn.Dropout(dropout))
|
40 |
-
layers += pad_conv_norm_relu(dim, dim, pad_mode, norm_layer, bias=bias, activ=False)
|
41 |
-
self.conv_block = nn.Sequential(*layers)
|
42 |
-
|
43 |
-
def forward(self, x): return x + self.conv_block(x)
|
44 |
-
|
45 |
-
|
46 |
-
def resnet_generator(ch_in:int, ch_out:int, n_ftrs:int=64, norm_layer:nn.Module=None,
|
47 |
-
dropout:float=0., n_blocks:int=9, pad_mode:str='reflection')->nn.Module:
|
48 |
-
norm_layer = ifnone(norm_layer, nn.InstanceNorm2d)
|
49 |
-
bias = (norm_layer == nn.InstanceNorm2d)
|
50 |
-
layers = pad_conv_norm_relu(ch_in, n_ftrs, 'reflection', norm_layer, pad=3, ks=7, bias=bias)
|
51 |
-
for i in range(2):
|
52 |
-
layers += pad_conv_norm_relu(n_ftrs, n_ftrs *2, 'zeros', norm_layer, stride=2, bias=bias)
|
53 |
-
n_ftrs *= 2
|
54 |
-
layers += [ResnetBlock(n_ftrs, pad_mode, norm_layer, dropout, bias) for _ in range(n_blocks)]
|
55 |
-
for i in range(2):
|
56 |
-
layers += convT_norm_relu(n_ftrs, n_ftrs//2, norm_layer, bias=bias)
|
57 |
-
n_ftrs //= 2
|
58 |
-
layers += [nn.ReflectionPad2d(3), nn.Conv2d(n_ftrs, ch_out, kernel_size=7, padding=0), nn.Tanh()]
|
59 |
-
return nn.Sequential(*layers)
|
60 |
-
|
61 |
-
model = resnet_generator(ch_in=3, ch_out=3, n_ftrs=64, norm_layer=None, dropout=0, n_blocks=9)
|
62 |
-
model.load_state_dict(torch.load('generator.pth',map_location=torch.device('cpu')))
|
63 |
-
model.eval()
|
64 |
-
|
65 |
-
|
66 |
-
totensor = torchvision.transforms.ToTensor()
|
67 |
-
normalize_fn = torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
|
68 |
-
topilimage = torchvision.transforms.ToPILImage()
|
69 |
-
|
70 |
-
def predict(input):
|
71 |
-
im = normalize_fn(totensor(input))
|
72 |
-
print(im.shape)
|
73 |
-
preds = model(im.unsqueeze(0))/2 + 0.5
|
74 |
-
print(preds.shape)
|
75 |
-
return topilimage(preds.squeeze(0).detach())
|
76 |
-
|
77 |
-
gr_interface = gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(256, 256)), outputs="image", title='Horse-to-Zebra CycleGAN')
|
78 |
-
gr_interface.launch(inline=False,share=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generator.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:fc7368c525d797a518058d29250176047d9420f498ad1ea50ad1472330eac695
|
3 |
-
size 45532191
|
|
|
|
|
|
|
|