multimodalart HF staff commited on
Commit
47a017a
1 Parent(s): 4ac3513

Remove stuff from dev env

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -9,15 +9,14 @@ from constants import *
9
  from inversion_utils import *
10
  from modified_pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
11
  from torch import autocast, inference_mode
12
- from diffusers import DiffusionPipeline
13
  from diffusers import DDIMScheduler
14
  from transformers import AutoProcessor, BlipForConditionalGeneration
15
 
16
- torch.cuda.empty_cache()
17
  # load pipelines
18
  sd_model_id = "stabilityai/stable-diffusion-2-1-base"
19
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
- sd_pipe = DiffusionPipeline.from_pretrained(sd_model_id).to(device)
21
  sd_pipe.scheduler = DDIMScheduler.from_config(sd_model_id, subfolder = "scheduler")
22
  sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id).to(device)
23
  blip_processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
@@ -792,4 +791,4 @@ with gr.Blocks(css="style.css") as demo:
792
 
793
 
794
  demo.queue()
795
- demo.launch(share=True)
 
9
  from inversion_utils import *
10
  from modified_pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
11
  from torch import autocast, inference_mode
12
+ from diffusers import StableDiffusionPipeline
13
  from diffusers import DDIMScheduler
14
  from transformers import AutoProcessor, BlipForConditionalGeneration
15
 
 
16
  # load pipelines
17
  sd_model_id = "stabilityai/stable-diffusion-2-1-base"
18
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
+ sd_pipe = StableDiffusionPipeline.from_pretrained(sd_model_id).to(device)
20
  sd_pipe.scheduler = DDIMScheduler.from_config(sd_model_id, subfolder = "scheduler")
21
  sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id).to(device)
22
  blip_processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
 
791
 
792
 
793
  demo.queue()
794
+ demo.launch()