import cv2
import numpy as np

def calculate_similarity(img1, img2):
    """计算两张文档图片的相似度，针对文档特征优化权重"""
    # 保持原始宽高比的情况下调整大小
    max_size = 800
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    
    scale1 = max_size / max(h1, w1)
    scale2 = max_size / max(h2, w2)
    
    img1 = cv2.resize(img1, (int(w1 * scale1), int(h1 * scale1)))
    img2 = cv2.resize(img2, (int(w2 * scale2), int(h2 * scale2)))
    
    # 转换为灰度图并进行直方图均衡化
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    gray1 = cv2.equalizeHist(gray1)
    gray2 = cv2.equalizeHist(gray2)
    
    # 1. 计算边缘相似度（文档边缘特征）
    edges1 = cv2.Canny(gray1, 100, 200)
    edges2 = cv2.Canny(gray2, 100, 200)
    score_edges = cv2.compareHist(
        cv2.calcHist([edges1], [0], None, [256], [0, 256]),
        cv2.calcHist([edges2], [0], None, [256], [0, 256]),
        cv2.HISTCMP_CORREL
    )
    
    # 2. 计算结构相似度（文档整体结构）
    score_ssim = cv2.compareHist(
        cv2.calcHist([gray1], [0], None, [256], [0, 256]),
        cv2.calcHist([gray2], [0], None, [256], [0, 256]),
        cv2.HISTCMP_CORREL
    )
    
    # 3. 使用SIFT特征匹配（局部特征）
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(gray1, None)
    kp2, des2 = sift.detectAndCompute(gray2, None)
    
    score_sift = 0
    if des1 is not None and des2 is not None and len(kp1) > 10 and len(kp2) > 10:
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        
        matches = flann.knnMatch(des1, des2, k=2)
        good_matches = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good_matches.append(m)
        
        score_sift = len(good_matches) / min(len(kp1), len(kp2))
    
    # 4. 计算文本区域相似度
    # 使用自适应阈值突出文本区域
    text_mask1 = cv2.adaptiveThreshold(gray1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    text_mask2 = cv2.adaptiveThreshold(gray2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    score_text = cv2.compareHist(
        cv2.calcHist([text_mask1], [0], None, [256], [0, 256]),
        cv2.calcHist([text_mask2], [0], None, [256], [0, 256]),
        cv2.HISTCMP_CORREL
    )
    
    # 调整权重，更注重文档特征
    weights = {
        'edges': 0.3,     # 边缘特征权重
        'structure': 0.2, # 整体结构权重
        'sift': 0.2,     # 局部特征权重
        'text': 0.3      # 文本区域权重
    }
    
    final_score = (
        weights['edges'] * score_edges +
        weights['structure'] * score_ssim +
        weights['sift'] * score_sift +
        weights['text'] * score_text
    )
    
    return final_score

def evaluate_image_quality(image):
    """
    综合评估图像清晰度
    返回: 清晰度得分(0-100)和详细评分信息的字典
    """
    # 转换为灰度图
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image.copy()
    
    scores = {}
    
    # 1. 拉普拉斯方差法（边缘清晰度）
    laplacian = cv2.Laplacian(gray, cv2.CV_64F)
    scores['laplacian'] = laplacian.var()
    
    # 2. 改进的梯度幅值法（局部对比度）
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5)
    gradient_magnitude = np.sqrt(sobelx**2 + sobely**2)
    scores['gradient'] = np.mean(gradient_magnitude)
    
    # 3. 局部方差（纹理细节）
    local_var = cv2.blur(gray.astype(float)**2, (5,5)) - cv2.blur(gray.astype(float), (5,5))**2
    scores['local_variance'] = np.mean(local_var)
    
    # 4. 亮度和对比度评估
    mean_intensity = np.mean(gray)
    std_intensity = np.std(gray)
    scores['contrast'] = std_intensity / (mean_intensity + 1e-6)  # 避免除零
    
    # 5. 噪声评估（使用高斯滤波的差异）
    blurred = cv2.GaussianBlur(gray, (5,5), 0)
    noise = np.mean(np.abs(gray - blurred))
    scores['noise'] = noise
    
    # 归一化各个得分
    max_scores = {
        'laplacian': 2000,    # 调整基准值
        'gradient': 100,
        'local_variance': 1000,
        'contrast': 0.5,
        'noise': 30
    }
    
    normalized_scores = {
        k: min(100, max(0, v * 100 / max_scores[k]))
        for k, v in scores.items()
    }
    
    # 调整权重
    weights = {
        'laplacian': 0.25,    # 边缘清晰度
        'gradient': 0.25,     # 局部对比度
        'local_variance': 0.2, # 纹理细节
        'contrast': 0.2,      # 整体对比度
        'noise': 0.1          # 噪声水平（反向指标）
    }
    
    # 计算加权得分
    final_score = sum(normalized_scores[k] * weights[k] for k in normalized_scores)
    
    # 对噪声进行惩罚
    noise_penalty = max(0, normalized_scores['noise'] - 50) * 0.2
    final_score = max(0, final_score - noise_penalty)
    
    return final_score, normalized_scores

def detect_skin(image, threshold=0.3):
    """检测图片中是否包含较多皮肤区域"""
    # 转换到YCrCb颜色空间
    ycrcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
    
    # 定义皮肤颜色范围
    min_YCrCb = np.array([0, 133, 77], np.uint8)
    max_YCrCb = np.array([255, 173, 127], np.uint8)
    
    # 创建皮肤区域蒙版
    skin_mask = cv2.inRange(ycrcb, min_YCrCb, max_YCrCb)
    
    # 计算皮肤区域占比
    skin_ratio = np.sum(skin_mask > 0) / (skin_mask.shape[0] * skin_mask.shape[1])
    
    return skin_ratio > threshold, skin_ratio