lemonaddie commited on
Commit
b854d16
1 Parent(s): e343493

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -45,12 +45,10 @@ from torchvision.transforms import InterpolationMode
45
 
46
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
 
48
- stable_diffusion_repo_path = "stabilityai/stable-diffusion-2-1-unclip"
49
- vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
50
- scheduler = DDIMScheduler.from_pretrained(stable_diffusion_repo_path, subfolder='scheduler')
51
- sd_image_variations_diffusers_path = 'lambdalabs/sd-image-variations-diffusers'
52
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(sd_image_variations_diffusers_path, subfolder="image_encoder")
53
- feature_extractor = CLIPImageProcessor.from_pretrained(sd_image_variations_diffusers_path, subfolder="feature_extractor")
54
  unet = UNet2DConditionModel.from_pretrained('.', subfolder="unet")
55
 
56
  pipe = DepthNormalEstimationPipeline(vae=vae,
 
45
 
46
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
 
48
+ vae = AutoencoderKL.from_pretrained("./", subfolder='vae')
49
+ scheduler = DDIMScheduler.from_pretrained("./", subfolder='scheduler')
50
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained("./", subfolder="image_encoder")
51
+ feature_extractor = CLIPImageProcessor.from_pretrained("./", subfolder="feature_extractor")
 
 
52
  unet = UNet2DConditionModel.from_pretrained('.', subfolder="unet")
53
 
54
  pipe = DepthNormalEstimationPipeline(vae=vae,