File size: 3,590 Bytes
27b9d2f
08617c1
19f6e7e
27b9d2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08617c1
 
 
 
27b9d2f
 
 
 
 
 
 
549320b
 
 
 
 
 
27b9d2f
 
 
 
 
 
 
 
08617c1
 
 
 
 
 
 
 
27b9d2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d92c123
27b9d2f
549320b
e56ef72
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import os
os.system("pip install --upgrade pip")
os.system("pip install dlib")
import sys
import face_detection
import PIL
from PIL import Image, ImageOps
import numpy as np

import torch
torch.set_grad_enabled(False)
net = torch.jit.load('dragness_p2s2p_torchscript_cpu.pt')
net.eval()


def tensor2im(var):
	var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
	var = ((var + 1) / 2)
	var[var < 0] = 0
	var[var > 1] = 1
	var = var * 255
	return Image.fromarray(var.astype('uint8'))

def image_as_array(image_in):
    im_array = np.array(image_in, np.float32)
    im_array = (im_array/255)*2 - 1
    im_array = np.transpose(im_array, (2, 0, 1))
    im_array = np.expand_dims(im_array, 0)
    return im_array

def find_aligned_face(image_in, size=256):   
    aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
    return aligned_image, n_faces, quad

def align_first_face(image_in, size=256):
    aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
    if n_faces == 0:
        try:
            image_in = ImageOps.exif_transpose(image_in)
        except:
            print("exif problem, not rotating")
        image_in = image_in.resize((size, size))
        im_array = image_as_array(image_in)
    else:
        im_array = image_as_array(aligned_image)

    return im_array

def img_concat_h(im1, im2):
    dst = Image.new('RGB', (im1.width + im2.width, im1.height))
    dst.paste(im1, (0, 0))
    dst.paste(im2, (im1.width, 0))
    return dst

import gradio as gr

def face2drag(
    img: Image.Image,
    size: int
) -> Image.Image:

    aligned_img = align_first_face(img)
    if aligned_img is None:
        output=None
    else:
        input = torch.Tensor(aligned_img)
        output = net(input)
        output = tensor2im(output[0])
        output = img_concat_h(tensor2im(torch.Tensor(aligned_img)[0]), output)

    return output
    
import os
import collections
from typing import Union, List
import numpy as np
from PIL import Image
import PIL.Image
import PIL.ImageFile
import numpy as np
import scipy.ndimage
import requests

def inference(img):
    out = face2drag(img, 256)
    return out
      
  
title = "Dragness"
description = "Gradio demo for Drag finetuned Pixel2Style2Pixel. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://github.com/justinpinkney/pixel2style2pixel/tree/nw' target='_blank'>Github Repo</a></p><p style='text-align: center'>samples: <img src='https://hf.space/gradioiframe/Norod78/Dragness/file/Sample00001.jpg' alt='Sample00001'/><img src='https://hf.space/gradioiframe/Norod78/Dragness/file/Sample00002.jpg' alt='Sample00002'/><img src='https://hf.space/gradioiframe/Norod78/Dragness/file/Sample00003.jpg' alt='Sample00003'/><img src='https://hf.space/gradioiframe/Norod78/Dragness/file/Sample00004.jpg' alt='Sample00004'/><img src='https://hf.space/gradioiframe/Norod78/Dragness/file/Sample00005.jpg' alt='Sample00005'/><img src='https://hf.space/gradioiframe/Norod78/Dragness/file/Sample00006.jpg' alt='Sample00006'/></p><p>Drag model was fine tuned by Doron Adler</p>"

examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'],['Example00006.jpg'],['Example00007.jpg']]
gr.Interface(inference, gr.inputs.Image(type="pil",shape=(1024,1024)), gr.outputs.Image(type="pil"),title=title,description=description,article=article,examples=examples,enable_queue=True,allow_flagging=False).launch()