Ahsen Khaliq commited on
Commit
7e51154
β€’
1 Parent(s): 2d6a240

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system("git clone https://github.com/onion-liu/BlendGAN.git")
3
+ os.system("gdown https://drive.google.com/uc?id=1eF04jKMLAb9DvzI72m8Akn5ykWf3EafE")
4
+ os.system("gdown https://drive.google.com/uc?id=14nevG94hNkkwaoK5eJLF1iv78cv5O8fN")
5
+ from PIL import Image
6
+ import torch
7
+ import gradio as gr
8
+ model2 = torch.hub.load(
9
+ "AK391/animegan2-pytorch:main",
10
+ "generator",
11
+ pretrained=True,
12
+ device="cuda",
13
+ progress=False
14
+ )
15
+ model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device="cuda")
16
+ face2paint = torch.hub.load(
17
+ 'AK391/animegan2-pytorch:main', 'face2paint',
18
+ size=512, device="cuda",side_by_side=False
19
+ )
20
+ def inference(img, ver):
21
+ os.system("""python style_transfer_folder.py --size 1024 --ckpt ./pretrained_models/blendgan.pt --psp_encoder_ckpt ./pretrained_models/psp_encoder.pt --style_img_path /content/BlendGAN/style/ --input_img_path /content/BlendGAN/input/ --outdir results/style_transfer/""")
22
+ return out
23
+
24
+ title = "AnimeGANv2"
25
+ description = "Gradio Demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
26
+ article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
27
+ examples=[['groot.jpeg','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['bill.png','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['tony.png','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['elon.png','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['IU.png','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['billie.png','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['will.png','version 2 (πŸ”Ί robustness,πŸ”» stylization)'],['beyonce.jpeg','version 1 (πŸ”Ί stylization, πŸ”» robustness)'],['gongyoo.jpeg','version 1 (πŸ”Ί stylization, πŸ”» robustness)']]
28
+ gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (πŸ”Ί stylization, πŸ”» robustness)','version 2 (πŸ”Ί robustness,πŸ”» stylization)'], type="value", default='version 2 (πŸ”Ί robustness,πŸ”» stylization)', label='version')
29
+ ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples,allow_flagging=False).launch()