Songwei Ge commited on
Commit
2031452
1 Parent(s): 1ee21db
models/__pycache__/attention.cpython-38.pyc ADDED
Binary file (28.7 kB). View file
 
models/__pycache__/region_diffusion.cpython-38.pyc ADDED
Binary file (8.42 kB). View file
 
models/__pycache__/unet_2d_blocks.cpython-38.pyc ADDED
Binary file (27.8 kB). View file
 
models/__pycache__/unet_2d_condition.cpython-38.pyc ADDED
Binary file (11.4 kB). View file
 
models/region_diffusion.py CHANGED
@@ -15,41 +15,26 @@ class RegionDiffusion(nn.Module):
15
  def __init__(self, device):
16
  super().__init__()
17
 
18
- try:
19
- with open('./TOKEN', 'r') as f:
20
- self.token = f.read().replace('\n', '') # remove the last \n!
21
- print(f'[INFO] loaded hugging face access token from ./TOKEN!')
22
- except FileNotFoundError as e:
23
- self.token = True
24
- print(f'[INFO] try to load hugging face access token from the default place, make sure you have run `huggingface-cli login`.')
25
-
26
  self.device = device
27
  self.num_train_timesteps = 1000
28
  self.clip_gradient = False
29
 
30
  print(f'[INFO] loading stable diffusion...')
31
- local_pretrained_dir = "runwayml/stable-diffusion-v1-5"
32
- if not os.path.isdir(local_pretrained_dir):
33
- save_pretrained = True
34
- load_paths = 'runwayml/stable-diffusion-v1-5'
35
- os.makedirs(local_pretrained_dir, exist_ok=True)
36
- else:
37
- save_pretrained = False
38
- load_paths = local_pretrained_dir
39
 
40
  # 1. Load the autoencoder model which will be used to decode the latents into image space.
41
  self.vae = AutoencoderKL.from_pretrained(
42
- load_paths, subfolder="vae", use_auth_token=self.token).to(self.device)
43
 
44
  # 2. Load the tokenizer and text encoder to tokenize and encode the text.
45
  self.tokenizer = CLIPTokenizer.from_pretrained(
46
- load_paths, subfolder='tokenizer', use_auth_token=self.token)
47
  self.text_encoder = CLIPTextModel.from_pretrained(
48
- load_paths, subfolder='text_encoder', use_auth_token=self.token).to(self.device)
49
 
50
  # 3. The UNet model for generating the latents.
51
  self.unet = UNet2DConditionModel.from_pretrained(
52
- load_paths, subfolder="unet", use_auth_token=self.token).to(self.device)
53
 
54
  if save_pretrained:
55
  self.vae.save_pretrained(os.path.join(local_pretrained_dir, 'vae'))
 
15
  def __init__(self, device):
16
  super().__init__()
17
 
 
 
 
 
 
 
 
 
18
  self.device = device
19
  self.num_train_timesteps = 1000
20
  self.clip_gradient = False
21
 
22
  print(f'[INFO] loading stable diffusion...')
23
+ model_id = 'runwayml/stable-diffusion-v1-5'
 
 
 
 
 
 
 
24
 
25
  # 1. Load the autoencoder model which will be used to decode the latents into image space.
26
  self.vae = AutoencoderKL.from_pretrained(
27
+ model_id, subfolder="vae").to(self.device)
28
 
29
  # 2. Load the tokenizer and text encoder to tokenize and encode the text.
30
  self.tokenizer = CLIPTokenizer.from_pretrained(
31
+ model_id, subfolder='tokenizer')
32
  self.text_encoder = CLIPTextModel.from_pretrained(
33
+ model_id, subfolder='text_encoder').to(self.device)
34
 
35
  # 3. The UNet model for generating the latents.
36
  self.unet = UNet2DConditionModel.from_pretrained(
37
+ model_id, subfolder="unet").to(self.device)
38
 
39
  if save_pretrained:
40
  self.vae.save_pretrained(os.path.join(local_pretrained_dir, 'vae'))
utils/__pycache__/attention_utils.cpython-38.pyc ADDED
Binary file (5.25 kB). View file
 
utils/__pycache__/richtext_utils.cpython-38.pyc ADDED
Binary file (6.5 kB). View file