Spaces:
Sleeping
Sleeping
new feature
Browse files
app.py
CHANGED
|
@@ -432,6 +432,215 @@ def apply_grabcut(image, margin_percent, iterations):
|
|
| 432 |
return labeled_img, foreground, info
|
| 433 |
|
| 434 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
def analyze_image_stats(image):
|
| 436 |
"""Provide detailed statistical analysis"""
|
| 437 |
if isinstance(image, Image.Image):
|
|
@@ -700,6 +909,84 @@ with gr.Blocks(title="Image Preprocessing Analyzer", theme=gr.themes.Soft()) as
|
|
| 700 |
inputs=[input_image, grabcut_margin, grabcut_iter],
|
| 701 |
outputs=[grabcut_output, grabcut_foreground, grabcut_info]
|
| 702 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 703 |
|
| 704 |
# Documentation
|
| 705 |
with gr.Accordion("📚 Filter Documentation", open=False):
|
|
@@ -759,6 +1046,30 @@ with gr.Blocks(title="Image Preprocessing Analyzer", theme=gr.themes.Soft()) as
|
|
| 759 |
- **Use Case**: Product photography, portrait background removal, object isolation
|
| 760 |
- **Algorithm**: Iteratively learns color distributions of foreground/background
|
| 761 |
- **Output**: Binary mask and extracted foreground object
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 762 |
""")
|
| 763 |
|
| 764 |
if __name__ == "__main__":
|
|
|
|
| 432 |
return labeled_img, foreground, info
|
| 433 |
|
| 434 |
|
| 435 |
+
def apply_fourier_filter(image, filter_type, cutoff_freq):
|
| 436 |
+
"""
|
| 437 |
+
Apply Fourier Transform filtering in frequency domain
|
| 438 |
+
|
| 439 |
+
Fourier Transform decomposes image into frequency components.
|
| 440 |
+
Low frequencies = smooth regions, High frequencies = edges/details
|
| 441 |
+
"""
|
| 442 |
+
if isinstance(image, Image.Image):
|
| 443 |
+
image = np.array(image)
|
| 444 |
+
|
| 445 |
+
# Convert to grayscale for Fourier
|
| 446 |
+
if len(image.shape) == 3:
|
| 447 |
+
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
| 448 |
+
else:
|
| 449 |
+
gray = image
|
| 450 |
+
|
| 451 |
+
# Apply Fourier Transform
|
| 452 |
+
f = np.fft.fft2(gray)
|
| 453 |
+
fshift = np.fft.fftshift(f) # Shift zero frequency to center
|
| 454 |
+
|
| 455 |
+
# Get magnitude spectrum for visualization
|
| 456 |
+
magnitude_spectrum = 20 * np.log(np.abs(fshift) + 1)
|
| 457 |
+
|
| 458 |
+
# Create filter mask
|
| 459 |
+
rows, cols = gray.shape
|
| 460 |
+
crow, ccol = rows // 2, cols // 2
|
| 461 |
+
|
| 462 |
+
# Create coordinate matrices
|
| 463 |
+
y, x = np.ogrid[:rows, :cols]
|
| 464 |
+
distance = np.sqrt((x - ccol)**2 + (y - crow)**2)
|
| 465 |
+
|
| 466 |
+
if filter_type == "Low-Pass (Blur)":
|
| 467 |
+
# Allow low frequencies, block high frequencies
|
| 468 |
+
mask = np.zeros((rows, cols), np.uint8)
|
| 469 |
+
mask[distance <= cutoff_freq] = 1
|
| 470 |
+
desc = "Removes high frequencies (edges/details), keeps smooth regions"
|
| 471 |
+
|
| 472 |
+
elif filter_type == "High-Pass (Sharpen)":
|
| 473 |
+
# Block low frequencies, allow high frequencies
|
| 474 |
+
mask = np.ones((rows, cols), np.uint8)
|
| 475 |
+
mask[distance <= cutoff_freq] = 0
|
| 476 |
+
desc = "Removes low frequencies (smooth regions), keeps edges/details"
|
| 477 |
+
|
| 478 |
+
elif filter_type == "Band-Pass":
|
| 479 |
+
# Allow middle frequencies
|
| 480 |
+
mask = np.zeros((rows, cols), np.uint8)
|
| 481 |
+
mask[(distance >= cutoff_freq/2) & (distance <= cutoff_freq)] = 1
|
| 482 |
+
desc = "Keeps only middle-range frequencies"
|
| 483 |
+
|
| 484 |
+
elif filter_type == "Band-Stop (Notch)":
|
| 485 |
+
# Block middle frequencies
|
| 486 |
+
mask = np.ones((rows, cols), np.uint8)
|
| 487 |
+
mask[(distance >= cutoff_freq/2) & (distance <= cutoff_freq)] = 0
|
| 488 |
+
desc = "Removes middle-range frequencies, keeps very low and very high"
|
| 489 |
+
else:
|
| 490 |
+
mask = np.ones((rows, cols), np.uint8)
|
| 491 |
+
desc = "No filtering"
|
| 492 |
+
|
| 493 |
+
# Apply mask
|
| 494 |
+
fshift_filtered = fshift * mask
|
| 495 |
+
|
| 496 |
+
# Inverse Fourier Transform
|
| 497 |
+
f_ishift = np.fft.ifftshift(fshift_filtered)
|
| 498 |
+
img_back = np.fft.ifft2(f_ishift)
|
| 499 |
+
img_back = np.real(img_back)
|
| 500 |
+
|
| 501 |
+
# Normalize to 0-255
|
| 502 |
+
img_back = np.clip(img_back, 0, 255).astype(np.uint8)
|
| 503 |
+
|
| 504 |
+
# Convert back to RGB
|
| 505 |
+
result_rgb = cv2.cvtColor(img_back, cv2.COLOR_GRAY2RGB)
|
| 506 |
+
|
| 507 |
+
# Create visualization of spectrum
|
| 508 |
+
magnitude_vis = np.clip(magnitude_spectrum, 0, 255).astype(np.uint8)
|
| 509 |
+
magnitude_rgb = cv2.cvtColor(magnitude_vis, cv2.COLOR_GRAY2RGB)
|
| 510 |
+
|
| 511 |
+
info = f"""
|
| 512 |
+
### Fourier Transform Filtering Applied
|
| 513 |
+
- **Filter Type**: {filter_type}
|
| 514 |
+
- **Cutoff Frequency**: {cutoff_freq} pixels
|
| 515 |
+
- **Effect**: {desc}
|
| 516 |
+
- **How it works**: Transforms to frequency domain, filters, transforms back
|
| 517 |
+
- **Use Case**: Periodic noise removal, sharpening, custom filtering
|
| 518 |
+
"""
|
| 519 |
+
|
| 520 |
+
return result_rgb, magnitude_rgb, info
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
def apply_gray_world(image, percentile):
|
| 524 |
+
"""
|
| 525 |
+
Apply Gray-World color constancy algorithm
|
| 526 |
+
|
| 527 |
+
Assumes the average color of the scene should be gray.
|
| 528 |
+
Adjusts color channels to achieve this, correcting color casts.
|
| 529 |
+
"""
|
| 530 |
+
if isinstance(image, Image.Image):
|
| 531 |
+
image = np.array(image)
|
| 532 |
+
|
| 533 |
+
if len(image.shape) != 3:
|
| 534 |
+
return cv2.cvtColor(image, cv2.COLOR_GRAY2RGB), "Image must be in color for Gray-World"
|
| 535 |
+
|
| 536 |
+
# Convert to float
|
| 537 |
+
img_float = image.astype(np.float32)
|
| 538 |
+
|
| 539 |
+
# Calculate average or percentile of each channel
|
| 540 |
+
if percentile == 50:
|
| 541 |
+
# Standard Gray-World: use mean
|
| 542 |
+
avg_r = np.mean(img_float[:, :, 0])
|
| 543 |
+
avg_g = np.mean(img_float[:, :, 1])
|
| 544 |
+
avg_b = np.mean(img_float[:, :, 2])
|
| 545 |
+
method = "Mean"
|
| 546 |
+
else:
|
| 547 |
+
# Robust Gray-World: use percentile (less sensitive to outliers)
|
| 548 |
+
avg_r = np.percentile(img_float[:, :, 0], percentile)
|
| 549 |
+
avg_g = np.percentile(img_float[:, :, 1], percentile)
|
| 550 |
+
avg_b = np.percentile(img_float[:, :, 2], percentile)
|
| 551 |
+
method = f"{percentile}th Percentile"
|
| 552 |
+
|
| 553 |
+
# Calculate gray value (average of all channels)
|
| 554 |
+
gray_value = (avg_r + avg_g + avg_b) / 3
|
| 555 |
+
|
| 556 |
+
# Calculate scaling factors
|
| 557 |
+
scale_r = gray_value / (avg_r + 1e-6)
|
| 558 |
+
scale_g = gray_value / (avg_g + 1e-6)
|
| 559 |
+
scale_b = gray_value / (avg_b + 1e-6)
|
| 560 |
+
|
| 561 |
+
# Apply scaling
|
| 562 |
+
result = img_float.copy()
|
| 563 |
+
result[:, :, 0] = np.clip(result[:, :, 0] * scale_r, 0, 255)
|
| 564 |
+
result[:, :, 1] = np.clip(result[:, :, 1] * scale_g, 0, 255)
|
| 565 |
+
result[:, :, 2] = np.clip(result[:, :, 2] * scale_b, 0, 255)
|
| 566 |
+
|
| 567 |
+
result = result.astype(np.uint8)
|
| 568 |
+
|
| 569 |
+
info = f"""
|
| 570 |
+
### Gray-World Color Constancy Applied
|
| 571 |
+
- **Method**: {method}
|
| 572 |
+
- **Scaling Factors**: R={scale_r:.3f}, G={scale_g:.3f}, B={scale_b:.3f}
|
| 573 |
+
- **Effect**: Removes color cast by balancing channel averages
|
| 574 |
+
- **Assumption**: Average scene color should be neutral gray
|
| 575 |
+
- **Use Case**: Correct lighting color casts (blue/yellow/green tints)
|
| 576 |
+
|
| 577 |
+
**Original Averages**: R={avg_r:.1f}, G={avg_g:.1f}, B={avg_b:.1f}
|
| 578 |
+
**Target Gray**: {gray_value:.1f}
|
| 579 |
+
"""
|
| 580 |
+
|
| 581 |
+
return result, info
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
def apply_anisotropic_diffusion(image, iterations, kappa, gamma):
|
| 585 |
+
"""
|
| 586 |
+
Apply Anisotropic Diffusion (Perona-Malik)
|
| 587 |
+
|
| 588 |
+
Edge-preserving smoothing that reduces noise while maintaining edges.
|
| 589 |
+
Diffusion is stronger in smooth regions, weaker near edges.
|
| 590 |
+
"""
|
| 591 |
+
if isinstance(image, Image.Image):
|
| 592 |
+
image = np.array(image)
|
| 593 |
+
|
| 594 |
+
# Convert to grayscale
|
| 595 |
+
if len(image.shape) == 3:
|
| 596 |
+
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
| 597 |
+
else:
|
| 598 |
+
gray = image
|
| 599 |
+
|
| 600 |
+
# Convert to float
|
| 601 |
+
img = gray.astype(np.float32)
|
| 602 |
+
|
| 603 |
+
# Perform anisotropic diffusion
|
| 604 |
+
for _ in range(iterations):
|
| 605 |
+
# Calculate gradients in 4 directions
|
| 606 |
+
gradN = np.roll(img, 1, axis=0) - img # North
|
| 607 |
+
gradS = np.roll(img, -1, axis=0) - img # South
|
| 608 |
+
gradE = np.roll(img, -1, axis=1) - img # East
|
| 609 |
+
gradW = np.roll(img, 1, axis=1) - img # West
|
| 610 |
+
|
| 611 |
+
# Calculate diffusion coefficients (edge-stopping function)
|
| 612 |
+
# Option 1: Exponential (preserves wide regions)
|
| 613 |
+
cN = np.exp(-(gradN / kappa) ** 2)
|
| 614 |
+
cS = np.exp(-(gradS / kappa) ** 2)
|
| 615 |
+
cE = np.exp(-(gradE / kappa) ** 2)
|
| 616 |
+
cW = np.exp(-(gradW / kappa) ** 2)
|
| 617 |
+
|
| 618 |
+
# Update image
|
| 619 |
+
img = img + gamma * (cN * gradN + cS * gradS + cE * gradE + cW * gradW)
|
| 620 |
+
|
| 621 |
+
# Clip and convert back
|
| 622 |
+
result = np.clip(img, 0, 255).astype(np.uint8)
|
| 623 |
+
result_rgb = cv2.cvtColor(result, cv2.COLOR_GRAY2RGB)
|
| 624 |
+
|
| 625 |
+
info = f"""
|
| 626 |
+
### Anisotropic Diffusion Applied
|
| 627 |
+
- **Iterations**: {iterations}
|
| 628 |
+
- **Kappa (Edge threshold)**: {kappa}
|
| 629 |
+
- **Gamma (Step size)**: {gamma}
|
| 630 |
+
- **Algorithm**: Perona-Malik diffusion
|
| 631 |
+
- **Effect**: Smooths noise while preserving edges
|
| 632 |
+
- **How it works**: Diffusion is adaptive - strong in flat regions, weak at edges
|
| 633 |
+
- **Use Case**: Medical imaging, noise reduction with edge preservation
|
| 634 |
+
|
| 635 |
+
**Parameters Guide**:
|
| 636 |
+
- Kappa: Controls what's considered an edge (10-50 typical)
|
| 637 |
+
- Gamma: Controls diffusion speed (0.1-0.25 typical, must be ≤0.25 for stability)
|
| 638 |
+
- Iterations: More = more smoothing (5-20 typical)
|
| 639 |
+
"""
|
| 640 |
+
|
| 641 |
+
return result_rgb, info
|
| 642 |
+
|
| 643 |
+
|
| 644 |
def analyze_image_stats(image):
|
| 645 |
"""Provide detailed statistical analysis"""
|
| 646 |
if isinstance(image, Image.Image):
|
|
|
|
| 909 |
inputs=[input_image, grabcut_margin, grabcut_iter],
|
| 910 |
outputs=[grabcut_output, grabcut_foreground, grabcut_info]
|
| 911 |
)
|
| 912 |
+
|
| 913 |
+
# Fourier Transform Filtering Tab
|
| 914 |
+
with gr.TabItem("🌊 Fourier Transform Filtering"):
|
| 915 |
+
gr.Markdown("""
|
| 916 |
+
**Fourier Transform** decomposes images into frequency components.
|
| 917 |
+
Filter in frequency domain to remove periodic noise or enhance specific features.
|
| 918 |
+
""")
|
| 919 |
+
|
| 920 |
+
with gr.Row():
|
| 921 |
+
fourier_type = gr.Radio(
|
| 922 |
+
["Low-Pass (Blur)", "High-Pass (Sharpen)", "Band-Pass", "Band-Stop (Notch)"],
|
| 923 |
+
value="Low-Pass (Blur)",
|
| 924 |
+
label="Filter Type"
|
| 925 |
+
)
|
| 926 |
+
fourier_cutoff = gr.Slider(10, 200, value=30, step=5, label="Cutoff Frequency (pixels)")
|
| 927 |
+
|
| 928 |
+
fourier_btn = gr.Button("Apply Fourier Filter", variant="primary")
|
| 929 |
+
|
| 930 |
+
with gr.Row():
|
| 931 |
+
fourier_output = gr.Image(label="Filtered Result")
|
| 932 |
+
fourier_spectrum = gr.Image(label="Frequency Spectrum")
|
| 933 |
+
|
| 934 |
+
fourier_info = gr.Markdown()
|
| 935 |
+
|
| 936 |
+
fourier_btn.click(
|
| 937 |
+
fn=apply_fourier_filter,
|
| 938 |
+
inputs=[input_image, fourier_type, fourier_cutoff],
|
| 939 |
+
outputs=[fourier_output, fourier_spectrum, fourier_info]
|
| 940 |
+
)
|
| 941 |
+
|
| 942 |
+
# Gray-World Color Constancy Tab
|
| 943 |
+
with gr.TabItem("🎨 Color Constancy (Gray-World)"):
|
| 944 |
+
gr.Markdown("""
|
| 945 |
+
**Gray-World Algorithm** corrects color casts caused by lighting.
|
| 946 |
+
Assumes the average color of a scene should be neutral gray.
|
| 947 |
+
""")
|
| 948 |
+
|
| 949 |
+
with gr.Row():
|
| 950 |
+
grayworld_percentile = gr.Slider(
|
| 951 |
+
40, 60, value=50, step=5,
|
| 952 |
+
label="Percentile (50=Mean, 40-45=Robust to highlights)"
|
| 953 |
+
)
|
| 954 |
+
|
| 955 |
+
grayworld_btn = gr.Button("Apply Gray-World", variant="primary")
|
| 956 |
+
|
| 957 |
+
with gr.Row():
|
| 958 |
+
grayworld_output = gr.Image(label="Color Corrected")
|
| 959 |
+
grayworld_info = gr.Markdown()
|
| 960 |
+
|
| 961 |
+
grayworld_btn.click(
|
| 962 |
+
fn=apply_gray_world,
|
| 963 |
+
inputs=[input_image, grayworld_percentile],
|
| 964 |
+
outputs=[grayworld_output, grayworld_info]
|
| 965 |
+
)
|
| 966 |
+
|
| 967 |
+
# Anisotropic Diffusion Tab
|
| 968 |
+
with gr.TabItem("🔬 Anisotropic Diffusion"):
|
| 969 |
+
gr.Markdown("""
|
| 970 |
+
**Anisotropic Diffusion** (Perona-Malik) performs edge-preserving smoothing.
|
| 971 |
+
Reduces noise while maintaining sharp edges - ideal for medical imaging.
|
| 972 |
+
""")
|
| 973 |
+
|
| 974 |
+
with gr.Row():
|
| 975 |
+
aniso_iter = gr.Slider(1, 30, value=10, step=1, label="Iterations")
|
| 976 |
+
aniso_kappa = gr.Slider(5, 100, value=20, step=5, label="Kappa (Edge threshold)")
|
| 977 |
+
aniso_gamma = gr.Slider(0.05, 0.25, value=0.15, step=0.05, label="Gamma (Step size)")
|
| 978 |
+
|
| 979 |
+
aniso_btn = gr.Button("Apply Anisotropic Diffusion", variant="primary")
|
| 980 |
+
|
| 981 |
+
with gr.Row():
|
| 982 |
+
aniso_output = gr.Image(label="Smoothed Result")
|
| 983 |
+
aniso_info = gr.Markdown()
|
| 984 |
+
|
| 985 |
+
aniso_btn.click(
|
| 986 |
+
fn=apply_anisotropic_diffusion,
|
| 987 |
+
inputs=[input_image, aniso_iter, aniso_kappa, aniso_gamma],
|
| 988 |
+
outputs=[aniso_output, aniso_info]
|
| 989 |
+
)
|
| 990 |
|
| 991 |
# Documentation
|
| 992 |
with gr.Accordion("📚 Filter Documentation", open=False):
|
|
|
|
| 1046 |
- **Use Case**: Product photography, portrait background removal, object isolation
|
| 1047 |
- **Algorithm**: Iteratively learns color distributions of foreground/background
|
| 1048 |
- **Output**: Binary mask and extracted foreground object
|
| 1049 |
+
|
| 1050 |
+
#### Fourier Transform Filtering
|
| 1051 |
+
- **Purpose**: Filter images in frequency domain
|
| 1052 |
+
- **How it works**: Converts to frequency domain, applies filter, converts back
|
| 1053 |
+
- **Low-Pass**: Removes high frequencies (edges), keeps smooth regions → blur effect
|
| 1054 |
+
- **High-Pass**: Removes low frequencies (smooth regions), keeps edges → sharpen effect
|
| 1055 |
+
- **Band-Pass**: Keeps only middle-range frequencies
|
| 1056 |
+
- **Band-Stop**: Removes middle-range frequencies (notch filter)
|
| 1057 |
+
- **Use Case**: Periodic noise removal, custom filtering, pattern analysis
|
| 1058 |
+
|
| 1059 |
+
#### Gray-World Color Constancy
|
| 1060 |
+
- **Purpose**: Correct color casts from lighting
|
| 1061 |
+
- **How it works**: Assumes average scene color should be neutral gray
|
| 1062 |
+
- **Method**: Balances RGB channels so their average equals gray
|
| 1063 |
+
- **Percentile**: 50=standard mean, 40-45=robust to bright highlights
|
| 1064 |
+
- **Use Case**: Indoor/outdoor lighting correction, white balance adjustment
|
| 1065 |
+
|
| 1066 |
+
#### Anisotropic Diffusion
|
| 1067 |
+
- **Purpose**: Edge-preserving noise reduction
|
| 1068 |
+
- **How it works**: Perona-Malik diffusion - smooths flat regions, preserves edges
|
| 1069 |
+
- **Kappa**: Edge threshold (10-50 typical, higher = more edges preserved)
|
| 1070 |
+
- **Gamma**: Diffusion speed (0.1-0.25, must be ≤0.25 for stability)
|
| 1071 |
+
- **Iterations**: More = more smoothing (5-20 typical)
|
| 1072 |
+
- **Use Case**: Medical imaging, noise reduction without edge loss
|
| 1073 |
""")
|
| 1074 |
|
| 1075 |
if __name__ == "__main__":
|