Ahsen Khaliq commited on
Commit
03881aa
1 Parent(s): 75b0726

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  os.system("pip install --upgrade torch==1.9.1+cu111 torchvision==0.10.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html")
3
  os.system("git clone https://github.com/openai/CLIP")
4
  os.system("pip install -e ./CLIP")
5
- os.system("pip install einops ninja scipy numpy Pillow tqdm")
6
  import sys
7
  sys.path.append('./CLIP')
8
  import io
@@ -21,6 +21,8 @@ from tqdm.notebook import tqdm
21
  from torchvision.transforms import Compose, Resize, ToTensor, Normalize
22
  from einops import rearrange
23
  import gradio as gr
 
 
24
  print(torch.cuda.get_device_name(0))
25
  device = torch.device('cuda:0')
26
  def fetch(url_or_path):
@@ -104,6 +106,7 @@ w_stds = G.mapping(zs, None).std(0)
104
 
105
 
106
  def inference(text):
 
107
  target = clip_model.embed_text(text)
108
  steps = 20
109
  seed = 2
@@ -146,9 +149,14 @@ def inference(text):
146
  q_ema = q_ema * 0.9 + q * 0.1
147
  image = G.synthesis(q_ema * w_stds + G.mapping.w_avg, noise_mode='const')
148
  pil_image = TF.to_pil_image(image[0].add(1).div(2).clamp(0,1))
 
149
  #os.makedirs(f'samples/{timestring}', exist_ok=True)
150
  #pil_image.save(f'samples/{timestring}/{i:04}.jpg')
151
- return pil_image
 
 
 
 
152
 
153
 
154
  title = "StyleGAN3+CLIP"
@@ -158,7 +166,7 @@ examples = [['elon musk']]
158
  gr.Interface(
159
  inference,
160
  "text",
161
- gr.outputs.Image(type="pil", label="Output"),
162
  title=title,
163
  description=description,
164
  article=article,
 
2
  os.system("pip install --upgrade torch==1.9.1+cu111 torchvision==0.10.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html")
3
  os.system("git clone https://github.com/openai/CLIP")
4
  os.system("pip install -e ./CLIP")
5
+ os.system("pip install einops ninja scipy numpy Pillow tqdm imageio-ffmpeg imageio")
6
  import sys
7
  sys.path.append('./CLIP')
8
  import io
 
21
  from torchvision.transforms import Compose, Resize, ToTensor, Normalize
22
  from einops import rearrange
23
  import gradio as gr
24
+ import imageio
25
+
26
  print(torch.cuda.get_device_name(0))
27
  device = torch.device('cuda:0')
28
  def fetch(url_or_path):
 
106
 
107
 
108
  def inference(text):
109
+ all_frames = []
110
  target = clip_model.embed_text(text)
111
  steps = 20
112
  seed = 2
 
149
  q_ema = q_ema * 0.9 + q * 0.1
150
  image = G.synthesis(q_ema * w_stds + G.mapping.w_avg, noise_mode='const')
151
  pil_image = TF.to_pil_image(image[0].add(1).div(2).clamp(0,1))
152
+ all_frames.append(pil_image)
153
  #os.makedirs(f'samples/{timestring}', exist_ok=True)
154
  #pil_image.save(f'samples/{timestring}/{i:04}.jpg')
155
+ writer = imageio.get_writer('test.mp4', fps=20)
156
+ for im in all_frames:
157
+ writer.append_data(np.array(im))
158
+ writer.close()
159
+ return pil_image, "test.mp4"
160
 
161
 
162
  title = "StyleGAN3+CLIP"
 
166
  gr.Interface(
167
  inference,
168
  "text",
169
+ [gr.outputs.Image(type="pil", label="Output"),"playable_video"],
170
  title=title,
171
  description=description,
172
  article=article,