File size: 2,311 Bytes
302a151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

import os

from PIL import Image
from torchvision import transforms as T
from torchvision.transforms import Compose, Resize, ToTensor, Normalize, RandomCrop, RandomHorizontalFlip
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
from huggan.pytorch.cyclegan.modeling_cyclegan import GeneratorResNet
import torch.nn as nn
import torch
import gradio as gr

from collections import OrderedDict
import glob




def pred_pipeline(img, transforms):
        orig_shape = img.shape
        input = transforms(img)
        input = input.unsqueeze(0)
        output_real = sim2real(input)
        output_syn = real2sim(output_real)
        out_img_real = make_grid(output_real,
                           nrow=1, normalize=True)  
        out_syn_real = make_grid(out_img_real,
                           nrow=1, normalize=True)          



        out_transform = Compose([
            T.Resize(orig_shape[:2]),
            T.ToPILImage()
        ])
        return out_transform(out_img_real),  out_transform(out_syn_real)




n_channels = 3
image_size = 512
input_shape = (image_size, image_size)

transform = Compose([
     T.ToPILImage(),
        T.Resize(input_shape),
        ToTensor(),
        Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])


sim2real = GeneratorResNet.from_pretrained('Chris1/sim2real-512', input_shape=(n_channels, image_size, image_size), 
                num_residual_blocks=9)
real2sim = GeneratorResNet.from_pretrained('Chris1/real2sim-512', input_shape=(n_channels, image_size, image_size), 
                num_residual_blocks=9)

gr.Interface(lambda image: pred_pipeline(image, transform), 
    inputs=gr.inputs.Image( label='input synthetic image'), 
    outputs=[
            gr.outputs.Image( type="pil",label='style transfer to the real world (generator G_AB synthetic to real applied to the chosen input)'),
            gr.outputs.Image( type="pil",label='real to synthetic translation (generator G_BA real to synthetic applied to the prediction of G_AB)')
            ],#plot,
    title = "GTA5(simulated) to Cityscapes (real) translation",
    examples = [
                [example] for example in glob.glob('./samples/*.png')
                ])\
    .launch()



#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
#iface.launch()