Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,11 +11,7 @@ class Dummy():
|
|
11 |
|
12 |
resolutions = ["1024 1024","1280 768","1344 768","768 1344","768 1280"]
|
13 |
|
14 |
-
# Ng
|
15 |
-
default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
|
16 |
-
|
17 |
# Load pipeline
|
18 |
-
model_id = "briaai/BRIA-2.2"
|
19 |
scheduler = EulerAncestralDiscreteScheduler(
|
20 |
beta_start=0.00085,
|
21 |
beta_end=0.012,
|
@@ -23,10 +19,12 @@ scheduler = EulerAncestralDiscreteScheduler(
|
|
23 |
num_train_timesteps=1000,
|
24 |
steps_offset=1
|
25 |
)
|
26 |
-
|
|
|
|
|
27 |
pipe.force_zeros_for_empty_prompt = False
|
28 |
|
29 |
-
print("Optimizing BRIA-2.2 - this could take a while")
|
30 |
t=time.time()
|
31 |
pipe.unet = torch.compile(
|
32 |
pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
|
@@ -68,7 +66,7 @@ def infer(prompt,negative_prompt,seed,resolution):
|
|
68 |
|
69 |
w,h = resolution.split()
|
70 |
w,h = int(w),int(h)
|
71 |
-
image = pipe(prompt,num_inference_steps=
|
72 |
print(f'gen time is {time.time()-t} secs')
|
73 |
|
74 |
# Future
|
|
|
11 |
|
12 |
resolutions = ["1024 1024","1280 768","1344 768","768 1344","768 1280"]
|
13 |
|
|
|
|
|
|
|
14 |
# Load pipeline
|
|
|
15 |
scheduler = EulerAncestralDiscreteScheduler(
|
16 |
beta_start=0.00085,
|
17 |
beta_end=0.012,
|
|
|
19 |
num_train_timesteps=1000,
|
20 |
steps_offset=1
|
21 |
)
|
22 |
+
unet = UNet2DConditionModel.from_pretrained("briaai/BRIA-LCM-2.2", torch_dtype=torch.float16)
|
23 |
+
pipe = DiffusionPipeline.from_pretrained("briaai/BRIA-2.2", unet=unet, torch_dtype=torch.float16)
|
24 |
+
pipe = to("cuda")
|
25 |
pipe.force_zeros_for_empty_prompt = False
|
26 |
|
27 |
+
print("Optimizing BRIA-LCM-2.2 - this could take a while")
|
28 |
t=time.time()
|
29 |
pipe.unet = torch.compile(
|
30 |
pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
|
|
|
66 |
|
67 |
w,h = resolution.split()
|
68 |
w,h = int(w),int(h)
|
69 |
+
image = pipe(prompt,num_inference_steps=8,generator=generator,width=w,height=h).images[0]
|
70 |
print(f'gen time is {time.time()-t} secs')
|
71 |
|
72 |
# Future
|