File size: 4,333 Bytes
52d252c
8941262
52d252c
 
 
 
 
7b10f82
52d252c
 
 
af3dcd2
 
0cc1558
 
a8868f5
0cc1558
 
8941262
cd1eaaf
52d252c
 
0cc1558
 
 
 
 
 
 
 
 
 
 
 
 
 
bb55b33
0cc1558
 
bb55b33
0cc1558
 
bb55b33
0cc1558
 
bb55b33
0cc1558
 
 
 
 
 
52d252c
bb55b33
 
52d252c
0cc1558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af3dcd2
a4fc95a
8941262
0055c8e
0df0208
af3dcd2
 
8941262
 
0cc1558
af3dcd2
8941262
 
 
 
 
 
 
 
 
 
0055c8e
91a3469
8941262
0055c8e
91a3469
0055c8e
8941262
 
 
 
 
 
 
 
 
 
bc8701f
 
 
76ea7e0
0cc1558
52d252c
 
263f380
 
52d252c
 
 
 
a3348bb
0cc1558
8941262
 
 
 
644c87b
0cc1558
 
 
 
 
 
8941262
 
f692f52
8941262
adf5cfd
 
acf7bcd
7b10f82
1a7da4b
11c3557
8941262
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import os
import sys
import torch
import gradio as gr
import numpy as np
import torchvision.transforms as transforms


from torch.autograd import Variable
from network.Transformer import Transformer

from PIL import Image

import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

MAX_DIMENSION = 1280
MODEL_PATH = "models"
COLOUR_MODEL = "RGB"

STYLE_SHINKAI = "Makoto Shinkai"
STYLE_HOSODA = "Mamoru Hosoda"
STYLE_MIYAZAKI = "Hayao Miyazaki"
STYLE_KON = "Satoshi Kon"
DEFAULT_STYLE = STYLE_SHINKAI
STYLE_CHOICE_LIST = [STYLE_SHINKAI, STYLE_HOSODA, STYLE_MIYAZAKI, STYLE_KON]

shinkai_model = Transformer()
hosoda_model = Transformer()
miyazaki_model = Transformer()
kon_model = Transformer()


shinkai_model.load_state_dict(
    torch.load(os.path.join(MODEL_PATH, "shinkai_makoto.pth"))
)
hosoda_model.load_state_dict(
    torch.load(os.path.join(MODEL_PATH, "hosoda_mamoru.pth"))
)
miyazaki_model.load_state_dict(
    torch.load(os.path.join(MODEL_PATH, "miyazaki_hayao.pth"))
)
kon_model.load_state_dict(
    torch.load(os.path.join(MODEL_PATH, "kon_satoshi.pth"))
)

shinkai_model.eval()
hosoda_model.eval()
miyazaki_model.eval()
kon_model.eval()

enable_gpu = torch.cuda.is_available()


def get_model(style):
    if style == STYLE_SHINKAI:
        return shinkai_model
    elif style == STYLE_HOSODA:
        return hosoda_model
    elif style == STYLE_MIYAZAKI:
        return miyazaki_model
    elif style == STYLE_KON:
        return kon_model
    else:
        logger.warning(
            f"Style {style} not found. Defaulting to Makoto Shinkai"
        )
        return shinkai_model


def adjust_image_for_model(img):
    logger.info(f"Image Height: {img.height}, Image Width: {img.width}")
    if img.height > MAX_DIMENSION or img.width > MAX_DIMENSION:
        logger.info(f"Dimensions too large. Resizing to {MAX_DIMENSION}px.")
        img.thumbnail((MAX_DIMENSION, MAX_DIMENSION), Image.ANTIALIAS)

    return img


def inference(img, style):
    img = adjust_image_for_model(img)

    # load image
    input_image = img.convert(COLOUR_MODEL)
    input_image = np.asarray(input_image)
    # RGB -> BGR
    input_image = input_image[:, :, [2, 1, 0]]
    input_image = transforms.ToTensor()(input_image).unsqueeze(0)
    # preprocess, (-1, 1)
    input_image = -1 + 2 * input_image

    if enable_gpu:
        logger.info(f"CUDA found. Using GPU.")
        input_image = Variable(input_image).cuda()
    else:
        logger.info(f"CUDA not found. Using CPU.")
        input_image = Variable(input_image).float()

    # forward
    model = get_model(style)
    output_image = model(input_image)
    output_image = output_image[0]
    # BGR -> RGB
    output_image = output_image[[2, 1, 0], :, :]
    output_image = output_image.data.cpu().float() * 0.5 + 0.5

    return transforms.ToPILImage()(output_image)


title = "Anime Background GAN"
description = "Gradio Demo for CartoonGAN by Chen Et. Al. Models are Shinkai Makoto, Hosoda Mamoru, Kon Satoshi, and Miyazaki Hayao."
article = "<p style='text-align: center'><a href='http://openaccess.thecvf.com/content_cvpr_2018/CameraReady/2205.pdf' target='_blank'>CartoonGAN Whitepaper from Chen et.al</a></p><p style='text-align: center'><a href='https://github.com/venture-anime/cartoongan-pytorch' target='_blank'>Github Repo</a></p><p style='text-align: center'><a href='https://github.com/Yijunmaverick/CartoonGAN-Test-Pytorch-Torch' target='_blank'>Original Implementation from Yijunmaverick</a></p><center><img src='https://visitor-badge.glitch.me/badge?page_id=akiyamasho' alt='visitor badge'></center></p>"

examples = [
    ["examples/garden_in.jpg", STYLE_SHINKAI],
    ["examples/library_in.jpg", STYLE_KON],
]


gr.Interface(
    fn=inference,
    inputs=[
        gr.inputs.Image(
            type="pil",
            label="Input Photo (less than 1280px on both width and height)",
        ),
        gr.inputs.Dropdown(
            STYLE_CHOICE_LIST,
            type="value",
            default=DEFAULT_STYLE,
            label="Style",
        ),
    ],
    outputs=gr.outputs.Image(
        type="pil",
        label="Output Image",
    ),
    title=title,
    description=description,
    article=article,
    examples=examples,
    allow_flagging="never",
    allow_screenshot=False,
).launch(enable_queue=True)