M Yusril commited on
Commit
d2fcc0b
β€’
1 Parent(s): 38f6354

Add application file

Browse files
Files changed (2) hide show
  1. app.py +29 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import torch
3
+ import gradio as gr
4
+
5
+ model2 = torch.hub.load(
6
+ "AK391/animegan2-pytorch:main",
7
+ "generator",
8
+ pretrained=True,
9
+ device="cuda",
10
+ progress=False
11
+ )
12
+ model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device="cuda")
13
+ face2paint = torch.hub.load(
14
+ 'AK391/animegan2-pytorch:main', 'face2paint',
15
+ size=512, device="cuda",side_by_side=False
16
+ )
17
+ def inference(img, ver):
18
+ if ver == 'version 2 (πŸ”Ί robustness,πŸ”» stylization)':
19
+ out = face2paint(model2, img)
20
+ else:
21
+ out = face2paint(model1, img)
22
+ return out
23
+
24
+ title = "AnimeGANv2"
25
+ description = "Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
26
+ article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/><img src='https://user-images.githubusercontent.com/26464535/127134790-93595da2-4f8b-4aca-a9d7-98699c5e6914.jpg' alt='animation'/></p>"
27
+ examples=[['groot.jpeg','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['bill.png','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['tony.png','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['elon.png','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['IU.png','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['billie.png','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['will.png','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['beyonce.png','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['gongyoo.jpeg','version 1 (πŸ”Ί stylization, πŸ”» robustness)']]
28
+ gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (πŸ”Ί stylization, πŸ”» robustness)','version 2 (πŸ”Ί robustness,πŸ”» stylization)'], type="value", default='version 2 (πŸ”Ί robustness,πŸ”» stylization)', label='version')
29
+ ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples,allow_flagging=False).launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ Pillow
4
+ gdown
5
+ numpy
6
+ scipy
7
+ cmake
8
+ onnxruntime-gpu
9
+ opencv-python-headless