import cv2
import numpy as np
import os
import tempfile
from sam.sam_client import predict

def sam_model_api(image, points=None, boxes=None):
    """Real SAM model API for segmentation"""
    # Save image to temp file
    with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp:
        cv2.imwrite(tmp.name, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
        tmp_path = tmp.name
    
    try:
        if points:
            # Convert points format for SAM API
            point_coords = [list(p) for p in points]
            point_labels = [1] * len(points)  # All foreground points
            masks, scores, _ = predict(
                tmp_path,
                point_coords=point_coords,
                point_labels=point_labels,
                multimask_output=False
            )
            mask = masks[0].astype(np.uint8) * 255
            
        elif boxes:
            # Convert boxes format for SAM API
            box = [boxes[0][0], boxes[0][1], boxes[0][2], boxes[0][3]]
            masks, _, _ = predict(
                tmp_path,
                box=box,
                multimask_output=False
            )
            mask = masks[0].astype(np.uint8) * 255
        else:
            mask = np.zeros(image.shape[:2], dtype=np.uint8)
            
        return mask
        
    finally:
        # Clean up temp file
        if os.path.exists(tmp_path):
            os.unlink(tmp_path)

def inpainting_model_api(original_img, mask, new_clothing_img):
    """Improved inpainting model API for virtual try-on"""
    # Resize clothing image to match mask size
    h, w = mask.shape[:2]
    new_clothing_img = cv2.resize(new_clothing_img, (w, h))
    
    # Convert mask to 3 channels for color image
    mask_3ch = cv2.merge([mask, mask, mask])
    
    # Normalize mask to 0-1 range
    mask_float = mask_3ch.astype(float)/255.0
    
    # Apply Gaussian blur to mask edges for smoother blending
    mask_blur = cv2.GaussianBlur(mask_float, (21, 21), 0)
    
    # Blend images using the mask
    result = original_img * (1 - mask_blur) + new_clothing_img * mask_blur
    result = result.astype(np.uint8)
    
    # Apply detail enhancement to make clothing look more natural
    result = cv2.detailEnhance(result, sigma_s=5, sigma_r=0.1)
    
    return [result]  # Return single result instead of multiple

def enhancement_model_api(image, method="detail", sigma_s=10, sigma_r=0.15, contrast=1.2):
    """Advanced image enhancement API"""
    if method == "detail":
        enhanced = cv2.detailEnhance(image, sigma_s=sigma_s, sigma_r=sigma_r)
    elif method == "color":
        lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
        l, a, b = cv2.split(lab)
        clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
        cl = clahe.apply(l)
        enhanced = cv2.merge((cl, a, b))
        enhanced = cv2.cvtColor(enhanced, cv2.COLOR_LAB2RGB)
    elif method == "contrast":
        enhanced = cv2.convertScaleAbs(image, alpha=contrast, beta=0)
    else:
        enhanced = image.copy()
    
    return enhanced
