Ahsen Khaliq commited on
Commit
72015f8
1 Parent(s): 96df419

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -20
app.py CHANGED
@@ -9,34 +9,21 @@ model2 = torch.hub.load(
9
  device="cuda",
10
  progress=False
11
  )
12
-
13
  model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device="cuda")
14
-
15
  face2paint = torch.hub.load(
16
  'AK391/animegan2-pytorch:main', 'face2paint',
17
  size=512, device="cuda",side_by_side=False
18
  )
19
-
20
- def inference(imgweb,img,option,ver):
21
- if option == 'webcam':
22
- if ver == 'version 2 (🔺 robustness,🔻 stylization)':
23
- out = face2paint(model2, imgweb)
24
- else:
25
- out = face2paint(model1, imgweb)
26
- else:
27
- if ver == 'version 2 (🔺 robustness,🔻 stylization)':
28
- out = face2paint(model2, img)
29
- else:
30
- out = face2paint(model1, img)
31
  return out
32
 
33
  title = "Animeganv2"
34
-
35
  description = "Gradio Demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below"
36
-
37
  article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
38
-
39
  examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['bill.png','version 1 (🔺 stylization, 🔻 robustness)'],['tony.png','version 1 (🔺 stylization, 🔻 robustness)'],['elon.png','version 2 (🔺 robustness,🔻 stylization)'],['IU.png','version 1 (🔺 stylization, 🔻 robustness)'],['billie.png','version 2 (🔺 robustness,🔻 stylization)'],['will.png','version 2 (🔺 robustness,🔻 stylization)'],['beyonce.jpeg','version 1 (🔺 stylization, 🔻 robustness)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
40
-
41
- gr.Interface(inference, [gr.inputs.Image(type="pil",label='Input Image',optional=True),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version'),gr.inputs.Radio(['webcam','image upload'], type="value", default='webcam', label='Source'),gr.inputs.Image(type="pil",label='Input Webcam',source='webcam',optional=True)
42
- ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples,allow_flagging=False).launch()
9
  device="cuda",
10
  progress=False
11
  )
 
12
  model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device="cuda")
 
13
  face2paint = torch.hub.load(
14
  'AK391/animegan2-pytorch:main', 'face2paint',
15
  size=512, device="cuda",side_by_side=False
16
  )
17
+ def inference(img, ver):
18
+ if ver == 'version 2 (🔺 robustness,🔻 stylization)':
19
+ out = face2paint(model2, img)
20
+ else:
21
+ out = face2paint(model1, img)
 
 
 
 
 
 
 
22
  return out
23
 
24
  title = "Animeganv2"
 
25
  description = "Gradio Demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below"
 
26
  article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
 
27
  examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['bill.png','version 1 (🔺 stylization, 🔻 robustness)'],['tony.png','version 1 (🔺 stylization, 🔻 robustness)'],['elon.png','version 2 (🔺 robustness,🔻 stylization)'],['IU.png','version 1 (🔺 stylization, 🔻 robustness)'],['billie.png','version 2 (🔺 robustness,🔻 stylization)'],['will.png','version 2 (🔺 robustness,🔻 stylization)'],['beyonce.jpeg','version 1 (🔺 stylization, 🔻 robustness)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
28
+ gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')
29
+ ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples,allow_flagging=False).launch()