Ahsen Khaliq commited on
Commit
d1188bd
1 Parent(s): c6cec0e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -104,7 +104,7 @@ zs = torch.randn([10000, G.mapping.z_dim], device=device)
104
  w_stds = G.mapping(zs, None).std(0)
105
 
106
 
107
- def inference(text,steps,image,mode):
108
  if mode == "CLIP+StyleGAN3":
109
  all_frames = []
110
  target = clip_model.embed_text(text)
@@ -165,12 +165,12 @@ def inference(text,steps,image,mode):
165
  writer.close()
166
  return pil_image, "test.mp4"
167
  else:
168
- os.system("python gen_images.py --outdir=out --trunc=1 --seeds=2 \
169
  --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl")
170
- os.system("python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-10 --grid=1x1 \
171
  --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl")
172
  img = Image.new("RGB", (800, 1280), (255, 255, 255))
173
- out = Image.open("out/2.png")
174
  return out, "lerp.mp4"
175
 
176
 
@@ -180,7 +180,7 @@ article = "<p style='text-align: center'><a href='https://colab.research.google.
180
  examples = [['mario',150,None]]
181
  gr.Interface(
182
  inference,
183
- ["text",gr.inputs.Slider(minimum=50, maximum=200, step=1, default=150, label="steps"),gr.inputs.Image(type="pil", label="Image (Optional)", optional=True),gr.inputs.Radio(["CLIP+StyleGAN3","Stylegan3 interpolation"], type="value", default="CLIP+StyleGAN3", label="mode")],
184
  [gr.outputs.Image(type="pil", label="Output"),"playable_video"],
185
  title=title,
186
  description=description,
 
104
  w_stds = G.mapping(zs, None).std(0)
105
 
106
 
107
+ def inference(text,steps,image,mode, seed):
108
  if mode == "CLIP+StyleGAN3":
109
  all_frames = []
110
  target = clip_model.embed_text(text)
 
165
  writer.close()
166
  return pil_image, "test.mp4"
167
  else:
168
+ os.system("python gen_images.py --outdir=out --trunc=1 --seeds="+seed+" \
169
  --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl")
170
+ os.system("python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-"+seed+" --grid=1x1 \
171
  --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl")
172
  img = Image.new("RGB", (800, 1280), (255, 255, 255))
173
+ out = Image.open("out/0002.png")
174
  return out, "lerp.mp4"
175
 
176
 
 
180
  examples = [['mario',150,None]]
181
  gr.Interface(
182
  inference,
183
+ ["text",gr.inputs.Slider(minimum=50, maximum=200, step=1, default=150, label="steps"),gr.inputs.Image(type="pil", label="Image (Optional)", optional=True),gr.inputs.Radio(["CLIP+StyleGAN3","Stylegan3 interpolation"], type="value", default="CLIP+StyleGAN3", label="mode"),gr.inputs.Slider(minimum=5, maximum=10, step=1, default=5, label="seed (for stylegan3)")],
184
  [gr.outputs.Image(type="pil", label="Output"),"playable_video"],
185
  title=title,
186
  description=description,