Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -90,7 +90,6 @@ def load_and_prepare_model():
|
|
| 90 |
#sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
| 91 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", token=True) #, beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True, token=True)
|
| 92 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
| 93 |
-
|
| 94 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 95 |
'ford442/RealVisXL_V5.0_BF16',
|
| 96 |
#torch_dtype=torch.bfloat16,
|
|
@@ -119,6 +118,9 @@ def load_and_prepare_model():
|
|
| 119 |
|
| 120 |
pipe = load_and_prepare_model()
|
| 121 |
|
|
|
|
|
|
|
|
|
|
| 122 |
MAX_SEED = np.iinfo(np.int32).max
|
| 123 |
|
| 124 |
neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
|
|
@@ -182,8 +184,8 @@ def generate_30(
|
|
| 182 |
):
|
| 183 |
seed = random.randint(0, MAX_SEED)
|
| 184 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 185 |
-
pipe.text_encoder=
|
| 186 |
-
pipe.text_encoder_2=
|
| 187 |
options = {
|
| 188 |
"prompt": [prompt],
|
| 189 |
"negative_prompt": [negative_prompt],
|
|
@@ -224,8 +226,8 @@ def generate_60(
|
|
| 224 |
):
|
| 225 |
seed = random.randint(0, MAX_SEED)
|
| 226 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 227 |
-
pipe.text_encoder=
|
| 228 |
-
pipe.text_encoder_2=
|
| 229 |
options = {
|
| 230 |
"prompt": [prompt],
|
| 231 |
"negative_prompt": [negative_prompt],
|
|
@@ -266,8 +268,8 @@ def generate_90(
|
|
| 266 |
):
|
| 267 |
seed = random.randint(0, MAX_SEED)
|
| 268 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 269 |
-
pipe.text_encoder=
|
| 270 |
-
pipe.text_encoder_2=
|
| 271 |
options = {
|
| 272 |
"prompt": [prompt],
|
| 273 |
"negative_prompt": [negative_prompt],
|
|
|
|
| 90 |
#sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
| 91 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", token=True) #, beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True, token=True)
|
| 92 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
|
|
|
| 93 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 94 |
'ford442/RealVisXL_V5.0_BF16',
|
| 95 |
#torch_dtype=torch.bfloat16,
|
|
|
|
| 118 |
|
| 119 |
pipe = load_and_prepare_model()
|
| 120 |
|
| 121 |
+
text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True).to(device)
|
| 122 |
+
text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True).to(device)
|
| 123 |
+
|
| 124 |
MAX_SEED = np.iinfo(np.int32).max
|
| 125 |
|
| 126 |
neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
|
|
|
|
| 184 |
):
|
| 185 |
seed = random.randint(0, MAX_SEED)
|
| 186 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 187 |
+
pipe.text_encoder=text_encoder
|
| 188 |
+
pipe.text_encoder_2=text_encoder_2
|
| 189 |
options = {
|
| 190 |
"prompt": [prompt],
|
| 191 |
"negative_prompt": [negative_prompt],
|
|
|
|
| 226 |
):
|
| 227 |
seed = random.randint(0, MAX_SEED)
|
| 228 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 229 |
+
pipe.text_encoder=text_encoder
|
| 230 |
+
pipe.text_encoder_2=text_encoder_2
|
| 231 |
options = {
|
| 232 |
"prompt": [prompt],
|
| 233 |
"negative_prompt": [negative_prompt],
|
|
|
|
| 268 |
):
|
| 269 |
seed = random.randint(0, MAX_SEED)
|
| 270 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 271 |
+
pipe.text_encoder=text_encoder
|
| 272 |
+
pipe.text_encoder_2=text_encoder_2
|
| 273 |
options = {
|
| 274 |
"prompt": [prompt],
|
| 275 |
"negative_prompt": [negative_prompt],
|