ucalyptus commited on
Commit
06229a5
1 Parent(s): 78dcb0e
Files changed (4) hide show
  1. app.py +11 -9
  2. configs/paths_config.py +4 -4
  3. requirements.txt +1 -2
  4. tune.py +6 -11
app.py CHANGED
@@ -1,20 +1,22 @@
1
  import os
2
  os.system("pip install gradio==2.4.6")
3
- import gradio as gr
4
- from PIL import Image
5
- import torch
 
 
 
 
6
 
7
 
8
- def greet(num):
9
- return num+69
10
 
11
- iface = gr.Interface(fn=greet, inputs="number", outputs="number")
12
- iface.launch(share=True)
13
 
14
 
15
  def inference(img):
16
- out = face2paint(model1, img)
17
- return out
 
18
 
19
  title = "Pivotal Tuning for Latent Based Real Image Editing"
20
  description = "Gradio Demo for Pivotal Tuning Inversion. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
 
1
  import os
2
  os.system("pip install gradio==2.4.6")
3
+ os.system("pip install gdown lpips")
4
+ os.system("gdown --id 1HKmjg6iXsWr4aFPuU0gBXPGR83wqMzq7 -O align.dat")
5
+ os.system("wget https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl")
6
+ os.system("gdown https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip")
7
+ os.system("unzip -d /usr/local/bin/")
8
+ os.system("sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force")
9
+ os.mkdir("embeddings/")
10
 
11
 
12
+ import gradio as gr
 
13
 
 
 
14
 
15
 
16
  def inference(img):
17
+ img.save("images/file.png")
18
+ os.system("python tune.py")
19
+ return
20
 
21
  title = "Pivotal Tuning for Latent Based Real Image Editing"
22
  description = "Gradio Demo for Pivotal Tuning Inversion. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below."
configs/paths_config.py CHANGED
@@ -1,9 +1,9 @@
1
  ## Pretrained models paths
2
- e4e = './pretrained_models/e4e_ffhq_encode.pt'
3
- stylegan2_ada_ffhq = '/home/sayantan/PTI/pretrained_models/ffhq.pkl'
4
  style_clip_pretrained_mappers = ''
5
- ir_se50 = './pretrained_models/model_ir_se50.pth'
6
- dlib = './pretrained_models/align.dat'
7
 
8
  ## Dirs for output files
9
  checkpoints_dir = './checkpoints'
 
1
  ## Pretrained models paths
2
+ e4e = 'e4e_ffhq_encode.pt'
3
+ stylegan2_ada_ffhq = 'ffhq.pkl'
4
  style_clip_pretrained_mappers = ''
5
+ ir_se50 = 'model_ir_se50.pth'
6
+ dlib = 'align.dat'
7
 
8
  ## Dirs for output files
9
  checkpoints_dir = './checkpoints'
requirements.txt CHANGED
@@ -5,5 +5,4 @@ gdown
5
  numpy
6
  scipy
7
  cmake
8
- onnxruntime-gpu
9
- opencv-python-headless
 
5
  numpy
6
  scipy
7
  cmake
8
+ onnxruntime-gpu
 
tune.py CHANGED
@@ -13,24 +13,19 @@ from scripts.latent_editor_wrapper import LatentEditorWrapper
13
  image_dir_name = 'images'
14
  use_multi_id_training = False
15
  global_config.device = 'cuda'
16
- paths_config.e4e = '/home/sayantan/PTI/pretrained_models/e4e_ffhq_encode.pt'
17
  paths_config.input_data_id = image_dir_name
18
  paths_config.input_data_path = f'{image_dir_name}'
19
- paths_config.stylegan2_ada_ffhq = '/home/sayantan/PTI/pretrained_models/ffhq.pkl'
20
- paths_config.checkpoints_dir = '/home/sayantan/PTI/'
21
- paths_config.style_clip_pretrained_mappers = '/home/sayantan/PTI/pretrained_models'
22
  hyperparameters.use_locality_regularization = False
23
  hyperparameters.lpips_type = 'squeeze'
24
 
25
  from scripts.run_pti import run_PTI
26
 
27
- @click.command()
28
- @click.pass_context
29
- @click.option('--rname', prompt='wandb RUN NAME', help='The name to give for the wandb run')
30
-
31
- def tune(ctx: click.Context,rname):
32
- runn = wandb.init(project='PTI', entity='masc', name = rname)
33
- model_id = run_PTI(run_name='',use_wandb=True, use_multi_id_training=False)
34
 
35
  #----------------------------------------------------------------------------
36
  if __name__ == '__main__':
 
13
  image_dir_name = 'images'
14
  use_multi_id_training = False
15
  global_config.device = 'cuda'
16
+ paths_config.e4e = 'e4e_ffhq_encode.pt'
17
  paths_config.input_data_id = image_dir_name
18
  paths_config.input_data_path = f'{image_dir_name}'
19
+ paths_config.stylegan2_ada_ffhq = 'ffhq.pkl'
20
+ paths_config.checkpoints_dir = ''
21
+ paths_config.style_clip_pretrained_mappers = ''
22
  hyperparameters.use_locality_regularization = False
23
  hyperparameters.lpips_type = 'squeeze'
24
 
25
  from scripts.run_pti import run_PTI
26
 
27
+ def tune():
28
+ model_id = run_PTI(run_name='',use_wandb=False, use_multi_id_training=False)
 
 
 
 
 
29
 
30
  #----------------------------------------------------------------------------
31
  if __name__ == '__main__':