pseudotheos commited on
Commit
3a4f72c
·
1 Parent(s): 0c421ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -47
app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import io
3
  import socket
4
  import requests
 
5
  from fastapi import FastAPI, File, UploadFile, Form
6
  from fastapi.responses import FileResponse
7
  from PIL import Image
@@ -21,6 +22,19 @@ import random
21
  import time
22
  import tempfile
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  app = FastAPI()
25
 
26
  BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
@@ -94,7 +108,6 @@ def convert_to_base64(pil_image):
94
  base64_image = processing_utils.encode_pil_to_base64(pil_image)
95
  return base64_image
96
 
97
- # Inference function
98
  def inference(
99
  control_image: Image.Image,
100
  prompt: str,
@@ -108,56 +121,73 @@ def inference(
108
  sampler = "DPM++ Karras SDE",
109
  #profile: gr.OAuthProfile | None = None,
110
  ):
111
- start_time = time.time()
112
- start_time_struct = time.localtime(start_time)
113
- start_time_formatted = time.strftime("%H:%M:%S", start_time_struct)
114
- print(f"Inference started at {start_time_formatted}")
115
-
116
- # Generate the initial image
117
- #init_image = init_pipe(prompt).images[0]
118
 
119
- # Rest of your existing code
120
- control_image_small = center_crop_resize(control_image)
121
- control_image_large = center_crop_resize(control_image, (1024, 1024))
 
 
 
 
 
 
 
 
 
 
122
 
123
- main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
124
- my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
125
- generator = torch.Generator(device="cuda").manual_seed(my_seed)
126
 
127
- out = main_pipe(
128
- prompt=prompt,
129
- negative_prompt=negative_prompt,
130
- image=control_image_small,
131
- guidance_scale=float(guidance_scale),
132
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
133
- generator=generator,
134
- control_guidance_start=float(control_guidance_start),
135
- control_guidance_end=float(control_guidance_end),
136
- num_inference_steps=15,
137
- output_type="latent"
138
- )
139
- upscaled_latents = upscale(out, "nearest-exact", 2)
140
- out_image = image_pipe(
141
- prompt=prompt,
142
- negative_prompt=negative_prompt,
143
- control_image=control_image_large,
144
- image=upscaled_latents,
145
- guidance_scale=float(guidance_scale),
146
- generator=generator,
147
- num_inference_steps=20,
148
- strength=upscaler_strength,
149
- control_guidance_start=float(control_guidance_start),
150
- control_guidance_end=float(control_guidance_end),
151
- controlnet_conditioning_scale=float(controlnet_conditioning_scale)
152
- )
153
- end_time = time.time()
154
- end_time_struct = time.localtime(end_time)
155
- end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
156
- print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
157
-
158
- return out_image["images"][0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
- import os
161
 
162
  def generate_image_from_parameters(prompt, guidance_scale, controlnet_scale, controlnet_end, upscaler_strength, seed, sampler_type, image):
163
  try:
 
2
  import io
3
  import socket
4
  import requests
5
+ import logging
6
  from fastapi import FastAPI, File, UploadFile, Form
7
  from fastapi.responses import FileResponse
8
  from PIL import Image
 
22
  import time
23
  import tempfile
24
 
25
+ logger = logging.getLogger(__name__)
26
+ # Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
27
+ logger.setLevel(logging.DEBUG)
28
+ file_handler = logging.FileHandler('inference.log')
29
+ stream_handler = logging.StreamHandler(sys.stdout)
30
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
31
+ # Set the formatter for the stream handler (command line)
32
+ stream_handler.setFormatter(formatter)
33
+
34
+ # Add the file handler and stream handler to the logger
35
+ logger.addHandler(file_handler)
36
+ logger.addHandler(stream_handler)
37
+
38
  app = FastAPI()
39
 
40
  BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
 
108
  base64_image = processing_utils.encode_pil_to_base64(pil_image)
109
  return base64_image
110
 
 
111
  def inference(
112
  control_image: Image.Image,
113
  prompt: str,
 
121
  sampler = "DPM++ Karras SDE",
122
  #profile: gr.OAuthProfile | None = None,
123
  ):
 
 
 
 
 
 
 
124
 
125
+ try:
126
+ # Log input types and values
127
+ logger.debug("Input Types: control_image=%s, prompt=%s, negative_prompt=%s, guidance_scale=%s, controlnet_conditioning_scale=%s, control_guidance_start=%s, control_guidance_end=%s, upscaler_strength=%s, seed=%s, sampler=%s",
128
+ type(control_image), type(prompt), type(negative_prompt), type(guidance_scale), type(controlnet_conditioning_scale),
129
+ type(control_guidance_start), type(control_guidance_end), type(upscaler_strength), type(seed), type(sampler))
130
+ logger.debug("Input Values: control_image=%s, prompt=%s, negative_prompt=%s, guidance_scale=%s, controlnet_conditioning_scale=%s, control_guidance_start=%s, control_guidance_end=%s, upscaler_strength=%s, seed=%s, sampler=%s",
131
+ control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale,
132
+ control_guidance_start, control_guidance_end, upscaler_strength, seed, sampler)
133
+
134
+ start_time = time.time()
135
+ start_time_struct = time.localtime(start_time)
136
+ start_time_formatted = time.strftime("%H:%M:%S", start_time_struct)
137
+ logger.info(f"Inference started at {start_time_formatted}")
138
 
 
 
 
139
 
140
+ # Generate the initial image
141
+ #init_image = init_pipe(prompt).images[0]
142
+
143
+ # Rest of your existing code
144
+ control_image_small = center_crop_resize(control_image)
145
+ control_image_large = center_crop_resize(control_image, (1024, 1024))
146
+
147
+ main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
148
+ my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
149
+ generator = torch.Generator(device="cuda").manual_seed(my_seed)
150
+
151
+ out = main_pipe(
152
+ prompt=prompt,
153
+ negative_prompt=negative_prompt,
154
+ image=control_image_small,
155
+ guidance_scale=float(guidance_scale),
156
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
157
+ generator=generator,
158
+ control_guidance_start=float(control_guidance_start),
159
+ control_guidance_end=float(control_guidance_end),
160
+ num_inference_steps=15,
161
+ output_type="latent"
162
+ )
163
+ upscaled_latents = upscale(out, "nearest-exact", 2)
164
+ out_image = image_pipe(
165
+ prompt=prompt,
166
+ negative_prompt=negative_prompt,
167
+ control_image=control_image_large,
168
+ image=upscaled_latents,
169
+ guidance_scale=float(guidance_scale),
170
+ generator=generator,
171
+ num_inference_steps=20,
172
+ strength=upscaler_strength,
173
+ control_guidance_start=float(control_guidance_start),
174
+ control_guidance_end=float(control_guidance_end),
175
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale)
176
+ )
177
+ end_time = time.time()
178
+ end_time_struct = time.localtime(end_time)
179
+ end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
180
+ print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
181
+ logger.debug("Output Types: generated_image=%s", type(None))
182
+ logger.debug("Output Values: generated_image=None")
183
+ return out_image["images"][0]
184
+
185
+
186
+ except Exception as e:
187
+ # Handle exceptions and log error message
188
+ logger.error("Error occurred during inference: %s", str(e))
189
+ return str(e)
190
 
 
191
 
192
  def generate_image_from_parameters(prompt, guidance_scale, controlnet_scale, controlnet_end, upscaler_strength, seed, sampler_type, image):
193
  try: