File size: 3,470 Bytes
2f23f07 d5f2bf5 2f23f07 b5c625b d63c6cd d2bf1cc d5f2bf5 cc74fc0 2f23f07 cc74fc0 2f23f07 cc74fc0 2f23f07 b5c625b 2ba277c 03888cd 2ba277c 136c53f 1e99cb9 ef99204 2f23f07 cc74fc0 2f23f07 b5c625b 2f23f07 4779d7a d5f2bf5 e174116 2bc17ff 136c53f c082871 95ec167 2c43fbf 95ec167 2c43fbf 2ba90cf c082871 136c53f a7c7049 136c53f 5490d43 ab34b0c 7654258 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import gradio as gr
import torch
from torch import nn
import imageio
import cv2
class Generator(nn.Module):
# Refer to the link below for explanations about nc, nz, and ngf
# https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs
def __init__(self, nc=4, nz=100, ngf=64):
super(Generator, self).__init__()
self.network = nn.Sequential(
nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh(),
)
def forward(self, input):
output = self.network(input)
return output
def display_gif(file_name, save_name):
images = []
for frame in range(8):
frame_name = '%d' % (frame)
image_filename = file_name + frame_name + '.png'
images.append(imageio.imread(image_filename))
gif_filename = 'avatar_source.gif'
return imageio.mimsave(gif_filename, images)
def display_gif_pad(file_name, save_name):
images = []
for frame in range(8):
frame_name = '%d' % (frame)
image_filename = file_name + frame_name + '.png'
image = imageio.imread(image_filename)
image = image[:, :, :3]
image_pad = cv2.copyMakeBorder(image, 0, 0, 32, 32, cv2.BORDER_CONSTANT, value=0)
images.append(image_pad)
gif_filename = 'avatar_source.gif'
return imageio.mimsave(gif_filename, images)
def display_image(file_name):
image_filename = file_name + '0' + '.png'
print(image_filename)
image = imageio.imread(image_filename)
imageio.imwrite('image.png', image)
def run(action, body, hair, top, bottom):
# body
if body == "human": body = '0'
elif body == "alien": body = '1'
# hair
if hair == "green": hair = '0'
elif hair == "yellow": hair = '2'
elif hair == "rose": hair = '4'
elif hair == "red": hair = '7'
elif hair == "wine": hair = '8'
# top
if top == "brown": top = '0'
elif top == "blue": top = '1'
elif top == "white": top = '2'
# bottom
if bottom == "white": bottom = '0'
elif bottom == "golden": bottom = '1'
elif bottom == "red": bottom = '2'
elif bottom == "silver": bottom = '3'
file_name_source = './Sprite/frames/domain_1/' + action + '/'
file_name_source = file_name_source + 'front' + '_' + str(body) + str(bottom) + str(top) + str(hair) + '_'
gif = display_gif_pad(file_name_source, 'avatar_source.gif')
return 'avatar_source.gif'
gr.Interface(
run,
inputs=[
gr.Radio(choices=["shoot", "slash", "spellcard", "thrust", "walk"], value="shoot"),
gr.Radio(choices=["human", "alien"], value="human"),
gr.Radio(choices=["green", "yellow", "rose", "red", "wine"], value="green"),
gr.Radio(choices=["brown", "blue", "white"], value="brown"),
gr.Radio(choices=["white", "golden", "red", "silver"], value="white"),
],
outputs=[
gr.components.Image(type="file", label="Avatar (Source)"),
],
live=True,
title="TransferVAE for Unsupervised Video Domain Adaptation",
).launch()
|