Spaces:
Running
on
T4
Running
on
T4
File size: 1,566 Bytes
56a97f7 92b7005 56a97f7 241373a 56a97f7 442b118 56a97f7 e763147 56a97f7 92b7005 56a97f7 753c25c 56a97f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import os
os.system("git clone https://github.com/bryandlee/animegan2-pytorch")
os.system("gdown https://drive.google.com/uc?id=1WK5Mdt6mwlcsqCZMHkCUSDJxN1UyFi0-")
os.system("gdown https://drive.google.com/uc?id=18H3iK09_d54qEDoWIc82SyWB2xun4gjU")
#os.system("pip install dlib")
import sys
sys.path.append("animegan2-pytorch")
import torch
torch.set_grad_enabled(False)
from model import Generator
device = "cpu"
model = Generator().eval().to(device)
model.load_state_dict(torch.load("face_paint_512_v2_0.pt"))
from PIL import Image
from torchvision.transforms.functional import to_tensor, to_pil_image
import gradio as gr
def face2paint(
img: Image.Image,
size: int,
side_by_side: bool = False,
) -> Image.Image:
w, h = img.size
s = min(w, h)
img = img.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
img = img.resize((size, size), Image.LANCZOS)
input = to_tensor(img).unsqueeze(0) * 2 - 1
output = model(input.to(device)).cpu()[0]
if side_by_side:
output = torch.cat([input[0], output], dim=2)
output = (output * 0.5 + 0.5).clip(0, 1)
return to_pil_image(output)
import os
#import dlib
import collections
from typing import Union, List
import numpy as np
from PIL import Image
import PIL.Image
import PIL.ImageFile
import numpy as np
import scipy.ndimage
import requests
def inference(image):
img = image
out = face2paint(img, 512)
return out
iface = gr.Interface(inference, gr.inputs.Image(type="pil"), gr.outputs.Image(type="pil"))
iface.launch()
|