mokady commited on
Commit
15b4028
1 Parent(s): 6289ea6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -26
app.py CHANGED
@@ -24,26 +24,26 @@ del vae
24
 
25
  pipe.force_zeros_for_empty_prompt = False
26
 
27
- print("Optimizing BRIA 2.2 FAST - this could take a while")
28
- t=time.time()
29
- pipe.unet = torch.compile(
30
- pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
31
- )
32
- with torch.no_grad():
33
- outputs = pipe(
34
- prompt="an apple",
35
- num_inference_steps=8,
36
- )
37
-
38
- # This will avoid future compilations on different shapes
39
- unet_compiled = torch._dynamo.run(pipe.unet)
40
- unet_compiled.config=pipe.unet.config
41
- unet_compiled.add_embedding = Dummy()
42
- unet_compiled.add_embedding.linear_1 = Dummy()
43
- unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
44
- pipe.unet = unet_compiled
45
-
46
- print(f"Optimizing finished successfully after {time.time()-t} secs")
47
 
48
  @spaces.GPU(enable_queue=True)
49
  def infer(prompt,seed,resolution):
@@ -89,13 +89,12 @@ with gr.Blocks(css=css) as demo:
89
  <p style="margin-bottom: 10px; font-size: 94%">
90
  This is a demo for
91
  <a href="https://huggingface.co/briaai/BRIA-2.2-FAST" target="_blank">BRIA 2.2 FAST </a>.
92
- This is a fast version of BRIA 2.2 text-to-image model, still trained on licensed data, and so provides full legal liability coverage for copyright and privacy infringement.
93
- Try it for free in our webapp demo <a href="https://labs.bria.ai/" </a>.
94
-
95
- Are you a startup or a student? We encourage you to apply for our Startup Plan
96
- <a href="https://pages.bria.ai/the-visual-generative-ai-platform-for-builders-startups-plan?_gl=1*cqrl81*_ga*MTIxMDI2NzI5OC4xNjk5NTQ3MDAz*_ga_WRN60H46X4*MTcwOTM5OTMzNC4yNzguMC4xNzA5Mzk5MzM0LjYwLjAuMA..) </a>
97
  This program are designed to support emerging businesses and academic pursuits with our cutting-edge technology.
98
-
99
  </p>
100
  ''')
101
  with gr.Group():
 
24
 
25
  pipe.force_zeros_for_empty_prompt = False
26
 
27
+ # print("Optimizing BRIA 2.2 FAST - this could take a while")
28
+ # t=time.time()
29
+ # pipe.unet = torch.compile(
30
+ # pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
31
+ # )
32
+ # with torch.no_grad():
33
+ # outputs = pipe(
34
+ # prompt="an apple",
35
+ # num_inference_steps=8,
36
+ # )
37
+
38
+ # # This will avoid future compilations on different shapes
39
+ # unet_compiled = torch._dynamo.run(pipe.unet)
40
+ # unet_compiled.config=pipe.unet.config
41
+ # unet_compiled.add_embedding = Dummy()
42
+ # unet_compiled.add_embedding.linear_1 = Dummy()
43
+ # unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
44
+ # pipe.unet = unet_compiled
45
+
46
+ # print(f"Optimizing finished successfully after {time.time()-t} secs")
47
 
48
  @spaces.GPU(enable_queue=True)
49
  def infer(prompt,seed,resolution):
 
89
  <p style="margin-bottom: 10px; font-size: 94%">
90
  This is a demo for
91
  <a href="https://huggingface.co/briaai/BRIA-2.2-FAST" target="_blank">BRIA 2.2 FAST </a>.
92
+ This is a fast version of BRIA 2.3 text-to-image model, still trained on licensed data, and so provides full legal liability coverage for copyright and privacy infringement.
93
+ Running time is 1.6s on A10 GPU. A demo for the newer version of BRIA-2.3 is also available <a href="https://huggingface.co/spaces/briaai/BRIA-2.3" target="_blank">HERE </a>.
94
+ You can also try it for free in our <a href="https://labs.bria.ai/" target="_blank">webapp demo </a>.
95
+ Are you a startup or a student? We encourage you to apply for our
96
+ <a href="https://pages.bria.ai/the-visual-generative-ai-platform-for-builders-startups-plan?_gl=1*cqrl81*_ga*MTIxMDI2NzI5OC4xNjk5NTQ3MDAz*_ga_WRN60H46X4*MTcwOTM5OTMzNC4yNzguMC4xNzA5Mzk5MzM0LjYwLjAuMA..) target="_blank">Startup Plan </a>
97
  This program are designed to support emerging businesses and academic pursuits with our cutting-edge technology.
 
98
  </p>
99
  ''')
100
  with gr.Group():