Ahsen Khaliq commited on
Commit
50b7dca
1 Parent(s): 46bd6e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -2,7 +2,7 @@ from PIL import Image
2
  import torch
3
  import gradio as gr
4
 
5
- model = torch.hub.load(
6
  "bryandlee/animegan2-pytorch:main",
7
  "generator",
8
  pretrained=True, # or give URL to a pretrained model
@@ -10,13 +10,19 @@ model = torch.hub.load(
10
  progress=False, # show progress
11
  )
12
 
 
 
 
13
  face2paint = torch.hub.load(
14
  'bryandlee/animegan2-pytorch:main', 'face2paint',
15
  size=512, device="cuda"
16
  )
17
 
18
- def inference(img):
19
- out = face2paint(model, img)
 
 
 
20
  return out
21
 
22
 
@@ -25,4 +31,5 @@ description = "Gradio demo for AnimeGanv2 Face Portrait v2. To use it, simply up
25
  article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
26
 
27
  examples=[['groot.jpeg'],['bill.png'],['tony.png'],['elon.png'],['IU.png'],['billie.png'],['will.png'],['beyonce.jpeg'],['gongyoo.jpeg']]
28
- gr.Interface(inference, gr.inputs.Image(type="pil"), gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples).launch()
 
2
  import torch
3
  import gradio as gr
4
 
5
+ model2 = torch.hub.load(
6
  "bryandlee/animegan2-pytorch:main",
7
  "generator",
8
  pretrained=True, # or give URL to a pretrained model
10
  progress=False, # show progress
11
  )
12
 
13
+ model1 = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1")
14
+
15
+
16
  face2paint = torch.hub.load(
17
  'bryandlee/animegan2-pytorch:main', 'face2paint',
18
  size=512, device="cuda"
19
  )
20
 
21
+ def inference(img, ver):
22
+ if ver == 'version 2':
23
+ out = face2paint(model2, img)
24
+ else:
25
+ out = face2paint(model1, img)
26
  return out
27
 
28
 
31
  article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
32
 
33
  examples=[['groot.jpeg'],['bill.png'],['tony.png'],['elon.png'],['IU.png'],['billie.png'],['will.png'],['beyonce.jpeg'],['gongyoo.jpeg']]
34
+ gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1','version 2'], type="value", default='version 2', label='version')
35
+ ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples).launch()