Spaces:
Sleeping
Sleeping
kabancov_et
commited on
Commit
·
5383c97
1
Parent(s):
bb1527b
Add detailed timing logs for performance monitoring: track time for each step in /detect and /analyze endpoints
Browse files- clothing_detector.py +70 -10
clothing_detector.py
CHANGED
|
@@ -193,7 +193,7 @@ class ClothingDetector:
|
|
| 193 |
global _cache_hits
|
| 194 |
_cache_hits += 1
|
| 195 |
_update_cache_access(image_hash) # Update access statistics
|
| 196 |
-
logger.info("Using cached high-quality segmentation result")
|
| 197 |
return _segmentation_cache[image_hash]
|
| 198 |
|
| 199 |
global _cache_misses
|
|
@@ -201,22 +201,36 @@ class ClothingDetector:
|
|
| 201 |
# Run segmentation
|
| 202 |
logger.info("Performing new high-quality segmentation")
|
| 203 |
|
|
|
|
| 204 |
try:
|
| 205 |
# Load and preprocess image
|
|
|
|
| 206 |
image = Image.open(BytesIO(image_bytes))
|
| 207 |
image = image.convert('RGB')
|
|
|
|
|
|
|
| 208 |
|
| 209 |
# Prepare inputs for the model
|
|
|
|
| 210 |
inputs = self.processor(images=image, return_tensors="pt")
|
|
|
|
|
|
|
| 211 |
|
| 212 |
# Move inputs to device
|
|
|
|
| 213 |
inputs = {k: v.to(self.device) for k, v in inputs.items()}
|
|
|
|
|
|
|
| 214 |
|
| 215 |
# Run inference
|
|
|
|
| 216 |
with torch.no_grad():
|
| 217 |
outputs = self.model(**inputs)
|
|
|
|
|
|
|
| 218 |
|
| 219 |
# Get predictions
|
|
|
|
| 220 |
logits = outputs.logits
|
| 221 |
pred_seg = torch.argmax(logits, dim=1).squeeze().cpu().numpy()
|
| 222 |
|
|
@@ -251,7 +265,11 @@ class ClothingDetector:
|
|
| 251 |
|
| 252 |
logger.info(f"Created high-quality segmentation: {pred_seg_high_quality.shape} for image size {image.size}")
|
| 253 |
|
|
|
|
|
|
|
|
|
|
| 254 |
# Store result in cache
|
|
|
|
| 255 |
_segmentation_cache[image_hash] = {
|
| 256 |
'pred_seg': pred_seg_high_quality, # Use high-quality version
|
| 257 |
'image': image
|
|
@@ -260,6 +278,12 @@ class ClothingDetector:
|
|
| 260 |
# Update cache access and cleanup if needed
|
| 261 |
_update_cache_access(image_hash)
|
| 262 |
_cleanup_cache()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
|
| 264 |
return {
|
| 265 |
'pred_seg': pred_seg_high_quality, # Return high-quality version
|
|
@@ -267,7 +291,8 @@ class ClothingDetector:
|
|
| 267 |
}
|
| 268 |
|
| 269 |
except Exception as e:
|
| 270 |
-
|
|
|
|
| 271 |
raise
|
| 272 |
|
| 273 |
def detect_clothing(self, image_bytes: bytes) -> dict:
|
|
@@ -571,14 +596,24 @@ class ClothingDetector:
|
|
| 571 |
Optimized version that returns only segmentation data without creating highlight images.
|
| 572 |
Much faster - client handles visualization.
|
| 573 |
"""
|
|
|
|
| 574 |
try:
|
|
|
|
|
|
|
| 575 |
seg_result = self._segment_image(image_bytes)
|
| 576 |
pred_seg = seg_result['pred_seg']
|
| 577 |
image = seg_result['image']
|
|
|
|
|
|
|
| 578 |
|
|
|
|
|
|
|
| 579 |
clothing_result = self.detect_clothing(image_bytes)
|
|
|
|
|
|
|
| 580 |
|
| 581 |
-
#
|
|
|
|
| 582 |
clothing_types = clothing_result.get('clothing_instances', [])
|
| 583 |
masks = {}
|
| 584 |
|
|
@@ -601,13 +636,23 @@ class ClothingDetector:
|
|
| 601 |
masks['all'] = self._mask_to_base64(all_clothing_mask)
|
| 602 |
logger.info("All masks created successfully")
|
| 603 |
|
| 604 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 605 |
image_hash = self._get_image_hash(image_bytes)
|
| 606 |
_segmentation_data_cache.set(image_hash, {
|
| 607 |
"pred_seg": pred_seg,
|
| 608 |
"image_size": list(image.size),
|
| 609 |
"original_image_bytes": image_bytes # Store original image for background removal
|
| 610 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 611 |
|
| 612 |
return {
|
| 613 |
**clothing_result,
|
|
@@ -619,7 +664,8 @@ class ClothingDetector:
|
|
| 619 |
}
|
| 620 |
}
|
| 621 |
except Exception as e:
|
| 622 |
-
|
|
|
|
| 623 |
raise
|
| 624 |
|
| 625 |
def _get_clothing_mask(self, pred_seg: np.ndarray, clothing_type: str) -> np.ndarray:
|
|
@@ -679,8 +725,10 @@ class ClothingDetector:
|
|
| 679 |
Analyze image using pre-computed segmentation data from server cache.
|
| 680 |
Much faster than full analysis.
|
| 681 |
"""
|
|
|
|
| 682 |
try:
|
| 683 |
-
# Get
|
|
|
|
| 684 |
image_hash = segmentation_data.get("image_hash")
|
| 685 |
if not image_hash:
|
| 686 |
raise ValueError("No image_hash provided in segmentation_data")
|
|
@@ -694,16 +742,27 @@ class ClothingDetector:
|
|
| 694 |
image_size = cached_data["image_size"]
|
| 695 |
original_image_bytes = cached_data["original_image_bytes"]
|
| 696 |
|
| 697 |
-
|
|
|
|
| 698 |
|
| 699 |
-
# Create
|
|
|
|
| 700 |
clothing_only_image = self._create_real_clothing_only_image(
|
| 701 |
original_image_bytes, pred_seg, selected_clothing
|
| 702 |
)
|
|
|
|
|
|
|
| 703 |
|
| 704 |
-
#
|
|
|
|
| 705 |
from process import get_dominant_color_from_base64
|
| 706 |
color = get_dominant_color_from_base64(clothing_only_image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 707 |
|
| 708 |
return {
|
| 709 |
"dominant_color": color,
|
|
@@ -713,7 +772,8 @@ class ClothingDetector:
|
|
| 713 |
}
|
| 714 |
|
| 715 |
except Exception as e:
|
| 716 |
-
|
|
|
|
| 717 |
raise
|
| 718 |
|
| 719 |
def _create_segmentation_visualization(self, pred_seg: np.ndarray, image_size: tuple, selected_clothing: str = None) -> str:
|
|
|
|
| 193 |
global _cache_hits
|
| 194 |
_cache_hits += 1
|
| 195 |
_update_cache_access(image_hash) # Update access statistics
|
| 196 |
+
logger.info("⏱️ Using cached high-quality segmentation result")
|
| 197 |
return _segmentation_cache[image_hash]
|
| 198 |
|
| 199 |
global _cache_misses
|
|
|
|
| 201 |
# Run segmentation
|
| 202 |
logger.info("Performing new high-quality segmentation")
|
| 203 |
|
| 204 |
+
seg_start = time.time()
|
| 205 |
try:
|
| 206 |
# Load and preprocess image
|
| 207 |
+
preprocess_start = time.time()
|
| 208 |
image = Image.open(BytesIO(image_bytes))
|
| 209 |
image = image.convert('RGB')
|
| 210 |
+
preprocess_time = time.time() - preprocess_start
|
| 211 |
+
logger.info(f"⏱️ Image preprocessing completed in {preprocess_time:.2f}s")
|
| 212 |
|
| 213 |
# Prepare inputs for the model
|
| 214 |
+
inputs_start = time.time()
|
| 215 |
inputs = self.processor(images=image, return_tensors="pt")
|
| 216 |
+
inputs_time = time.time() - inputs_start
|
| 217 |
+
logger.info(f"⏱️ Input preparation completed in {inputs_time:.2f}s")
|
| 218 |
|
| 219 |
# Move inputs to device
|
| 220 |
+
device_start = time.time()
|
| 221 |
inputs = {k: v.to(self.device) for k, v in inputs.items()}
|
| 222 |
+
device_time = time.time() - device_start
|
| 223 |
+
logger.info(f"⏱️ Device transfer completed in {device_time:.2f}s")
|
| 224 |
|
| 225 |
# Run inference
|
| 226 |
+
inference_start = time.time()
|
| 227 |
with torch.no_grad():
|
| 228 |
outputs = self.model(**inputs)
|
| 229 |
+
inference_time = time.time() - inference_start
|
| 230 |
+
logger.info(f"⏱️ Model inference completed in {inference_time:.2f}s")
|
| 231 |
|
| 232 |
# Get predictions
|
| 233 |
+
postprocess_start = time.time()
|
| 234 |
logits = outputs.logits
|
| 235 |
pred_seg = torch.argmax(logits, dim=1).squeeze().cpu().numpy()
|
| 236 |
|
|
|
|
| 265 |
|
| 266 |
logger.info(f"Created high-quality segmentation: {pred_seg_high_quality.shape} for image size {image.size}")
|
| 267 |
|
| 268 |
+
postprocess_time = time.time() - postprocess_start
|
| 269 |
+
logger.info(f"⏱️ Postprocessing completed in {postprocess_time:.2f}s")
|
| 270 |
+
|
| 271 |
# Store result in cache
|
| 272 |
+
cache_start = time.time()
|
| 273 |
_segmentation_cache[image_hash] = {
|
| 274 |
'pred_seg': pred_seg_high_quality, # Use high-quality version
|
| 275 |
'image': image
|
|
|
|
| 278 |
# Update cache access and cleanup if needed
|
| 279 |
_update_cache_access(image_hash)
|
| 280 |
_cleanup_cache()
|
| 281 |
+
cache_time = time.time() - cache_start
|
| 282 |
+
logger.info(f"⏱️ Cache operations completed in {cache_time:.2f}s")
|
| 283 |
+
|
| 284 |
+
# Total segmentation time
|
| 285 |
+
total_seg_time = time.time() - seg_start
|
| 286 |
+
logger.info(f"⏱️ TOTAL segmentation completed in {total_seg_time:.2f}s (preprocess: {preprocess_time:.2f}s, inputs: {inputs_time:.2f}s, device: {device_time:.2f}s, inference: {inference_time:.2f}s, postprocess: {postprocess_time:.2f}s, cache: {cache_time:.2f}s)")
|
| 287 |
|
| 288 |
return {
|
| 289 |
'pred_seg': pred_seg_high_quality, # Return high-quality version
|
|
|
|
| 291 |
}
|
| 292 |
|
| 293 |
except Exception as e:
|
| 294 |
+
total_seg_time = time.time() - seg_start
|
| 295 |
+
logger.error(f"❌ Error in segmentation after {total_seg_time:.2f}s: {e}")
|
| 296 |
raise
|
| 297 |
|
| 298 |
def detect_clothing(self, image_bytes: bytes) -> dict:
|
|
|
|
| 596 |
Optimized version that returns only segmentation data without creating highlight images.
|
| 597 |
Much faster - client handles visualization.
|
| 598 |
"""
|
| 599 |
+
start_time = time.time()
|
| 600 |
try:
|
| 601 |
+
# Step 1: Segmentation
|
| 602 |
+
seg_start = time.time()
|
| 603 |
seg_result = self._segment_image(image_bytes)
|
| 604 |
pred_seg = seg_result['pred_seg']
|
| 605 |
image = seg_result['image']
|
| 606 |
+
seg_time = time.time() - seg_start
|
| 607 |
+
logger.info(f"⏱️ Segmentation completed in {seg_time:.2f}s")
|
| 608 |
|
| 609 |
+
# Step 2: Clothing detection
|
| 610 |
+
detect_start = time.time()
|
| 611 |
clothing_result = self.detect_clothing(image_bytes)
|
| 612 |
+
detect_time = time.time() - detect_start
|
| 613 |
+
logger.info(f"⏱️ Clothing detection completed in {detect_time:.2f}s")
|
| 614 |
|
| 615 |
+
# Step 3: Create masks
|
| 616 |
+
masks_start = time.time()
|
| 617 |
clothing_types = clothing_result.get('clothing_instances', [])
|
| 618 |
masks = {}
|
| 619 |
|
|
|
|
| 636 |
masks['all'] = self._mask_to_base64(all_clothing_mask)
|
| 637 |
logger.info("All masks created successfully")
|
| 638 |
|
| 639 |
+
masks_time = time.time() - masks_start
|
| 640 |
+
logger.info(f"⏱️ Masks creation completed in {masks_time:.2f}s")
|
| 641 |
+
|
| 642 |
+
# Step 4: Cache storage
|
| 643 |
+
cache_start = time.time()
|
| 644 |
image_hash = self._get_image_hash(image_bytes)
|
| 645 |
_segmentation_data_cache.set(image_hash, {
|
| 646 |
"pred_seg": pred_seg,
|
| 647 |
"image_size": list(image.size),
|
| 648 |
"original_image_bytes": image_bytes # Store original image for background removal
|
| 649 |
})
|
| 650 |
+
cache_time = time.time() - cache_start
|
| 651 |
+
logger.info(f"⏱️ Cache storage completed in {cache_time:.2f}s")
|
| 652 |
+
|
| 653 |
+
# Total time
|
| 654 |
+
total_time = time.time() - start_time
|
| 655 |
+
logger.info(f"🚀 TOTAL /detect completed in {total_time:.2f}s (seg: {seg_time:.2f}s, detect: {detect_time:.2f}s, masks: {masks_time:.2f}s, cache: {cache_time:.2f}s)")
|
| 656 |
|
| 657 |
return {
|
| 658 |
**clothing_result,
|
|
|
|
| 664 |
}
|
| 665 |
}
|
| 666 |
except Exception as e:
|
| 667 |
+
total_time = time.time() - start_time
|
| 668 |
+
logger.error(f"❌ Error in optimized clothing detection after {total_time:.2f}s: {e}")
|
| 669 |
raise
|
| 670 |
|
| 671 |
def _get_clothing_mask(self, pred_seg: np.ndarray, clothing_type: str) -> np.ndarray:
|
|
|
|
| 725 |
Analyze image using pre-computed segmentation data from server cache.
|
| 726 |
Much faster than full analysis.
|
| 727 |
"""
|
| 728 |
+
start_time = time.time()
|
| 729 |
try:
|
| 730 |
+
# Step 1: Get data from cache
|
| 731 |
+
cache_start = time.time()
|
| 732 |
image_hash = segmentation_data.get("image_hash")
|
| 733 |
if not image_hash:
|
| 734 |
raise ValueError("No image_hash provided in segmentation_data")
|
|
|
|
| 742 |
image_size = cached_data["image_size"]
|
| 743 |
original_image_bytes = cached_data["original_image_bytes"]
|
| 744 |
|
| 745 |
+
cache_time = time.time() - cache_start
|
| 746 |
+
logger.info(f"⏱️ Cache retrieval completed in {cache_time:.2f}s for hash: {image_hash[:8]}...")
|
| 747 |
|
| 748 |
+
# Step 2: Create clothing-only image
|
| 749 |
+
image_start = time.time()
|
| 750 |
clothing_only_image = self._create_real_clothing_only_image(
|
| 751 |
original_image_bytes, pred_seg, selected_clothing
|
| 752 |
)
|
| 753 |
+
image_time = time.time() - image_start
|
| 754 |
+
logger.info(f"⏱️ Clothing-only image creation completed in {image_time:.2f}s")
|
| 755 |
|
| 756 |
+
# Step 3: Analyze dominant color
|
| 757 |
+
color_start = time.time()
|
| 758 |
from process import get_dominant_color_from_base64
|
| 759 |
color = get_dominant_color_from_base64(clothing_only_image)
|
| 760 |
+
color_time = time.time() - color_start
|
| 761 |
+
logger.info(f"⏱️ Dominant color analysis completed in {color_time:.2f}s")
|
| 762 |
+
|
| 763 |
+
# Total time
|
| 764 |
+
total_time = time.time() - start_time
|
| 765 |
+
logger.info(f"🚀 TOTAL /analyze completed in {total_time:.2f}s (cache: {cache_time:.2f}s, image: {image_time:.2f}s, color: {color_time:.2f}s)")
|
| 766 |
|
| 767 |
return {
|
| 768 |
"dominant_color": color,
|
|
|
|
| 772 |
}
|
| 773 |
|
| 774 |
except Exception as e:
|
| 775 |
+
total_time = time.time() - start_time
|
| 776 |
+
logger.error(f"❌ Error in analysis from segmentation after {total_time:.2f}s: {e}")
|
| 777 |
raise
|
| 778 |
|
| 779 |
def _create_segmentation_visualization(self, pred_seg: np.ndarray, image_size: tuple, selected_clothing: str = None) -> str:
|