mokady commited on
Commit
9be6105
1 Parent(s): 799c48f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -28
app.py CHANGED
@@ -14,8 +14,8 @@ resolutions = ["1024 1024","1280 768","1344 768","768 1344","768 1280" ]
14
  # Load pipeline
15
 
16
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
17
- unet = UNet2DConditionModel.from_pretrained("briaai/BRIA-2.2-FAST", torch_dtype=torch.float16)
18
- pipe = DiffusionPipeline.from_pretrained("briaai/BRIA-2.2", torch_dtype=torch.float16, unet=unet, vae=vae)
19
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
20
  pipe.to('cuda')
21
  del unet
@@ -24,26 +24,26 @@ del vae
24
 
25
  pipe.force_zeros_for_empty_prompt = False
26
 
27
- print("Optimizing BRIA 2.2 FAST - this could take a while")
28
- t=time.time()
29
- pipe.unet = torch.compile(
30
- pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
31
- )
32
- with torch.no_grad():
33
- outputs = pipe(
34
- prompt="an apple",
35
- num_inference_steps=8,
36
- )
37
-
38
- # This will avoid future compilations on different shapes
39
- unet_compiled = torch._dynamo.run(pipe.unet)
40
- unet_compiled.config=pipe.unet.config
41
- unet_compiled.add_embedding = Dummy()
42
- unet_compiled.add_embedding.linear_1 = Dummy()
43
- unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
44
- pipe.unet = unet_compiled
45
-
46
- print(f"Optimizing finished successfully after {time.time()-t} secs")
47
 
48
  @spaces.GPU(enable_queue=True)
49
  def infer(prompt,seed,resolution):
@@ -84,16 +84,16 @@ css = """
84
  """
85
  with gr.Blocks(css=css) as demo:
86
  with gr.Column(elem_id="col-container"):
87
- gr.Markdown("## BRIA 2.2 FAST")
88
  gr.HTML('''
89
  <p style="margin-bottom: 10px; font-size: 94%">
90
  This is a demo for
91
- <a href="https://huggingface.co/briaai/BRIA-2.2-FAST" target="_blank">BRIA 2.2 FAST </a>.
92
- This is a fast version of BRIA 2.2 text-to-image model, still trained on licensed data, and so provides full legal liability coverage for copyright and privacy infringement.
93
- Try it for free in our webapp demo <a href="https://labs.bria.ai/" </a>.
94
 
95
- Are you a startup or a student? We encourage you to apply for our Startup Plan
96
- <a href="https://pages.bria.ai/the-visual-generative-ai-platform-for-builders-startups-plan?_gl=1*cqrl81*_ga*MTIxMDI2NzI5OC4xNjk5NTQ3MDAz*_ga_WRN60H46X4*MTcwOTM5OTMzNC4yNzguMC4xNzA5Mzk5MzM0LjYwLjAuMA..) </a>
97
  This program are designed to support emerging businesses and academic pursuits with our cutting-edge technology.
98
 
99
  </p>
 
14
  # Load pipeline
15
 
16
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
17
+ unet = UNet2DConditionModel.from_pretrained("briaai/BRIA-2.3-FAST", torch_dtype=torch.float16)
18
+ pipe = DiffusionPipeline.from_pretrained("briaai/BRIA-2.3-BETA", torch_dtype=torch.float16, unet=unet, vae=vae)
19
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
20
  pipe.to('cuda')
21
  del unet
 
24
 
25
  pipe.force_zeros_for_empty_prompt = False
26
 
27
+ # print("Optimizing BRIA 2.3 FAST - this could take a while")
28
+ # t=time.time()
29
+ # pipe.unet = torch.compile(
30
+ # pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
31
+ # )
32
+ # with torch.no_grad():
33
+ # outputs = pipe(
34
+ # prompt="an apple",
35
+ # num_inference_steps=8,
36
+ # )
37
+
38
+ # # This will avoid future compilations on different shapes
39
+ # unet_compiled = torch._dynamo.run(pipe.unet)
40
+ # unet_compiled.config=pipe.unet.config
41
+ # unet_compiled.add_embedding = Dummy()
42
+ # unet_compiled.add_embedding.linear_1 = Dummy()
43
+ # unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
44
+ # pipe.unet = unet_compiled
45
+
46
+ # print(f"Optimizing finished successfully after {time.time()-t} secs")
47
 
48
  @spaces.GPU(enable_queue=True)
49
  def infer(prompt,seed,resolution):
 
84
  """
85
  with gr.Blocks(css=css) as demo:
86
  with gr.Column(elem_id="col-container"):
87
+ gr.Markdown("## BRIA 2.3 FAST")
88
  gr.HTML('''
89
  <p style="margin-bottom: 10px; font-size: 94%">
90
  This is a demo for
91
+ <a href="https://huggingface.co/briaai/BRIA-2.3-FAST" target="_blank">BRIA 2.3 FAST </a>.
92
+ This is a fast version of BRIA 2.3 text-to-image model, still trained on licensed data, and so provides full legal liability coverage for copyright and privacy infringement.
93
+ You can also try it for free in our <a href="https://labs.bria.ai/" target="_blank">webapp demo </a>.
94
 
95
+ Are you a startup or a student? We encourage you to apply for our
96
+ <a href="https://pages.bria.ai/the-visual-generative-ai-platform-for-builders-startups-plan?_gl=1*cqrl81*_ga*MTIxMDI2NzI5OC4xNjk5NTQ3MDAz*_ga_WRN60H46X4*MTcwOTM5OTMzNC4yNzguMC4xNzA5Mzk5MzM0LjYwLjAuMA..) target="_blank">Startup Plan </a>
97
  This program are designed to support emerging businesses and academic pursuits with our cutting-edge technology.
98
 
99
  </p>