lemonaddie commited on
Commit
4b257ce
1 Parent(s): 7b799c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -45,10 +45,17 @@ from torchvision.transforms import InterpolationMode
45
 
46
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
 
48
- vae = AutoencoderKL.from_pretrained('.', subfolder='vae')
49
- scheduler = DDIMScheduler.from_pretrained('.', subfolder='scheduler')
50
- image_encoder = CLIPVisionModelWithProjection.from_pretrained('.', subfolder="image_encoder")
51
- feature_extractor = CLIPImageProcessor.from_pretrained('.', subfolder="feature_extractor")
 
 
 
 
 
 
 
52
  unet = UNet2DConditionModel.from_pretrained('.', subfolder="unet7000")
53
 
54
  pipe = DepthNormalEstimationPipeline(vae=vae,
 
45
 
46
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
 
48
+ # vae = AutoencoderKL.from_pretrained('.', subfolder='vae')
49
+ # scheduler = DDIMScheduler.from_pretrained('.', subfolder='scheduler')
50
+ # image_encoder = CLIPVisionModelWithProjection.from_pretrained('.', subfolder="image_encoder")
51
+ # feature_extractor = CLIPImageProcessor.from_pretrained('.', subfolder="feature_extractor")
52
+
53
+ stable_diffusion_repo_path = "stabilityai/stable-diffusion-2-1-unclip"
54
+ vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
55
+ scheduler = DDIMScheduler.from_pretrained(stable_diffusion_repo_path, subfolder='scheduler')
56
+ sd_image_variations_diffusers_path = 'lambdalabs/sd-image-variations-diffusers'
57
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(sd_image_variations_diffusers_path, subfolder="image_encoder")
58
+ feature_extractor = CLIPImageProcessor.from_pretrained(sd_image_variations_diffusers_path, subfolder="feature_extractor")
59
  unet = UNet2DConditionModel.from_pretrained('.', subfolder="unet7000")
60
 
61
  pipe = DepthNormalEstimationPipeline(vae=vae,