kael558 commited on
Commit
9ada1fb
1 Parent(s): b6fa156
Files changed (4) hide show
  1. .gitignore +7 -1
  2. __pycache__/sd.cpython-310.pyc +0 -0
  3. requirements.txt +3 -2
  4. sd.py +30 -7
.gitignore CHANGED
@@ -1 +1,7 @@
1
- venv/
 
 
 
 
 
 
 
1
+ venv/
2
+ AdaBins/
3
+ k-diffusion/
4
+ MiDaS/
5
+ models/*
6
+ pytorch3d-lite/
7
+ stable-diffusion/
__pycache__/sd.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
requirements.txt CHANGED
@@ -1,11 +1,11 @@
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
  torch==1.12.1+cu113
3
  torchvision==0.13.1+cu113
 
4
  omegaconf==2.2.3
5
  einops==0.4.1
6
  pytorch-lightning==1.7.4
7
  torchmetrics==0.9.3
8
- torchtext==0.13.1
9
  transformers==4.21.2
10
  kornia==0.6.7
11
  git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
@@ -17,4 +17,5 @@ matplotlib
17
  resize-right
18
  timm
19
  torchdiffeq
20
- #opencv-python
 
 
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
  torch==1.12.1+cu113
3
  torchvision==0.13.1+cu113
4
+ torchtext==0.13.1
5
  omegaconf==2.2.3
6
  einops==0.4.1
7
  pytorch-lightning==1.7.4
8
  torchmetrics==0.9.3
 
9
  transformers==4.21.2
10
  kornia==0.6.7
11
  git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
 
17
  resize-right
18
  timm
19
  torchdiffeq
20
+ opencv-python
21
+ scikit-image
sd.py CHANGED
@@ -16,12 +16,17 @@ from torch import autocast
16
 
17
  from PIL import Image
18
  import os
 
19
 
20
- from huggingface_hub import snapshot_download
21
  from omegaconf import OmegaConf
22
 
23
  # 1. Download stable diffusion repository and set path
24
  os.system("git clone https://github.com/deforum/stable-diffusion")
 
 
 
 
25
  os.system("git clone https://github.com/deforum/k-diffusion/")
26
  with open('k-diffusion/k_diffusion/__init__.py', 'w') as f:
27
  f.write('')
@@ -31,6 +36,9 @@ sys.path.extend([
31
  'src/clip',
32
  'stable-diffusion/',
33
  'k-diffusion',
 
 
 
34
  ])
35
  from helpers import sampler_fn
36
  from k_diffusion.external import CompVisDenoiser
@@ -45,9 +53,9 @@ def load_model_from_config(config, ckpt, verbose=False, device='cuda', half_prec
45
  pl_sd = torch.load(ckpt, map_location=map_location)
46
  if "global_step" in pl_sd:
47
  print(f"Global Step: {pl_sd['global_step']}")
48
- sd = pl_sd["state_dict"]
49
  model = instantiate_from_config(config.model)
50
- m, u = model.load_state_dict(sd, strict=False)
51
  if len(m) > 0 and verbose:
52
  print("missing keys:")
53
  print(m)
@@ -62,16 +70,31 @@ def load_model_from_config(config, ckpt, verbose=False, device='cuda', half_prec
62
  model.eval()
63
  return model
64
 
 
 
 
 
 
 
 
65
  ckpt_config_path = "./stable-diffusion/configs/stable-diffusion/v1-inference.yaml"
66
- ckpt_path = snapshot_download(repo_id="CompVis/stable-diffusion-v-1-4-original", filename="sd-v1-4.ckpt")
 
 
 
 
 
 
 
 
 
 
67
 
68
  local_config = OmegaConf.load(f"{ckpt_config_path}")
69
  model = load_model_from_config(local_config, f"{ckpt_path}")
70
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
71
  model = model.to(device)
72
-
73
-
74
-
75
 
76
  class DeformAnimKeys():
77
  def __init__(self, anim_args):
 
16
 
17
  from PIL import Image
18
  import os
19
+ import requests
20
 
21
+ #from huggingface_hub import snapshot_download
22
  from omegaconf import OmegaConf
23
 
24
  # 1. Download stable diffusion repository and set path
25
  os.system("git clone https://github.com/deforum/stable-diffusion")
26
+ os.system("git clone https://github.com/shariqfarooq123/AdaBins.git")
27
+ os.system("git clone https://github.com/isl-org/MiDaS.git")
28
+ os.system("git clone https://github.com/MSFTserver/pytorch3d-lite.git")
29
+
30
  os.system("git clone https://github.com/deforum/k-diffusion/")
31
  with open('k-diffusion/k_diffusion/__init__.py', 'w') as f:
32
  f.write('')
 
36
  'src/clip',
37
  'stable-diffusion/',
38
  'k-diffusion',
39
+ 'pytorch3d-lite',
40
+ 'AdaBins',
41
+ 'MiDaS',
42
  ])
43
  from helpers import sampler_fn
44
  from k_diffusion.external import CompVisDenoiser
 
53
  pl_sd = torch.load(ckpt, map_location=map_location)
54
  if "global_step" in pl_sd:
55
  print(f"Global Step: {pl_sd['global_step']}")
56
+ sd_var = pl_sd["state_dict"]
57
  model = instantiate_from_config(config.model)
58
+ m, u = model.load_state_dict(sd_var, strict=False)
59
  if len(m) > 0 and verbose:
60
  print("missing keys:")
61
  print(m)
 
70
  model.eval()
71
  return model
72
 
73
+ print('Model loading...')
74
+
75
+ models_path = "models"
76
+ model_checkpoint = "sd-v1-4.ckpt"
77
+ ckpt_path = os.path.join(models_path, model_checkpoint)
78
+ #ckpt_path = snapshot_download(repo_id="CompVis/stable-diffusion-v-1-4-original", filename="sd-v1-4.ckpt")
79
+
80
  ckpt_config_path = "./stable-diffusion/configs/stable-diffusion/v1-inference.yaml"
81
+
82
+ if os.path.exists(ckpt_path):
83
+ print(f"{ckpt_path} exists")
84
+ else:
85
+ print(f"Attempting to download {model_checkpoint}...this may take a while")
86
+ url = 'https://kael558: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@huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt'
87
+ ckpt_request = requests.get(url)
88
+
89
+ print('Model downloaded.')
90
+ with open(os.path.join(models_path, model_checkpoint), 'wb') as model_file:
91
+ model_file.write(ckpt_request.content)
92
 
93
  local_config = OmegaConf.load(f"{ckpt_config_path}")
94
  model = load_model_from_config(local_config, f"{ckpt_path}")
95
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
96
  model = model.to(device)
97
+ print('Model saved.')
 
 
98
 
99
  class DeformAnimKeys():
100
  def __init__(self, anim_args):