import cv2
import numpy as np
import torch
from PIL import Image
from sklearn.cluster import AgglomerativeClustering
import torchvision.models as models
import torchvision.transforms as transforms
from typing import Dict, Tuple, List, Optional
from torchvision.models import ResNet50_Weights
import dlib
from app.models.process_config import ProcessConfig
from app.models.common import FilterReason
from app.models.image_processor import ImageProcessor

class QualityChecker:
    def __init__(self):
        """初始化质量检查器"""
        # 使用新的权重加载方式
        self.model = models.resnet50(weights=ResNet50_Weights.DEFAULT)
        self.model.eval()
        
        if torch.cuda.is_available():
            self.model = self.model.cuda()
        
        # 删除最后的全连接层，只使用特征
        self.model = torch.nn.Sequential(*list(self.model.children())[:-1])
        
        # 设置图像预处理
        self.transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
        ])
        
        # 初始化人脸检测器
        self.face_detector = dlib.get_frontal_face_detector()
        
        # 设置检测参数
        self.face_detection_params = {
            'min_face_ratio': 0.01,    # 最小人脸比例
            'max_face_ratio': 0.5,     # 最大人脸比例
            'center_tolerance': 0.3,    # 人脸中心位置容差
            'quality_params': {
                'sharpness': 80,       # 清晰度阈值
                'brightness_min': 30,   # 最小亮度
                'brightness_max': 220,  # 最大亮度
                'contrast': 20         # 对比度阈值
            }
        }
    
    def normalize_lighting(self, img_path):
        """标准化图片光照"""
        try:
            print(f"开始处理图片光照: {img_path}")
            
            # 读取图片
            img = ImageProcessor.imread_with_chinese_path(img_path)
            if img is None:
                print(f"无法读取图片: {img_path}")
                return None
            
            print(f"图片尺寸: {img.shape}")
            
            # 转换到LAB颜色空间
            try:
                lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
                print("成功转换到LAB颜色空间")
            except Exception as e:
                print(f"转换到LAB颜色空间失败: {e}")
                return None
            
            # 分离通道
            try:
                l, a, b = cv2.split(lab)
                print("成功分离LAB通道")
            except Exception as e:
                print(f"分离LAB通道失败: {e}")
                return None
            
            # 对L通道进行CLAHE
            try:
                clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
                l_norm = clahe.apply(l)
                print("成功应用CLAHE")
            except Exception as e:
                print(f"应用CLAHE失败: {e}")
                return None
            
            # 合并通道
            try:
                lab_norm = cv2.merge((l_norm, a, b))
                print("成功合并通道")
            except Exception as e:
                print(f"合并通道失败: {e}")
                return None
            
            # 转换回BGR
            try:
                normalized = cv2.cvtColor(lab_norm, cv2.COLOR_LAB2BGR)
                print("成功转换回BGR")
            except Exception as e:
                print(f"转换回BGR失败: {e}")
                return None
            
            # 验证处理结果
            if normalized is None or normalized.size == 0:
                print("处理结果无效")
                return None
            
            print(f"光照处理完成，输出图片尺寸: {normalized.shape}")
            return normalized
            
        except Exception as e:
            print(f"光照处理过程中发生错误: {e}")
            return None
    
    def check_exposure(self, img_path, valid_range=(0.05, 0.95)):
        img = ImageProcessor.imread_with_chinese_path(img_path, cv2.IMREAD_GRAYSCALE)
        hist = cv2.calcHist([img],[0],None,[256],[0,256])
        cdf = hist.cumsum()
        cdf_normalized = cdf / cdf.max()
        return (cdf_normalized[25] > valid_range[0]) and \
               (cdf_normalized[230] < valid_range[1])
    
    def get_clip_feature(self, img_path):
        img = Image.open(img_path).convert('RGB')
        img = self.transform(img).unsqueeze(0)
        if torch.cuda.is_available():
            img = img.cuda()
        
        with torch.no_grad():
            feature = self.model(img)
            return feature.squeeze().cpu().numpy()
    
    def cluster_images(self, image_paths, threshold=0.2):
        """聚类图片"""
        if not image_paths:
            return []
            
        features = []
        for path in image_paths:
            try:
                img = Image.open(path).convert('RGB')
                img_tensor = self.transform(img).unsqueeze(0)
                if torch.cuda.is_available():
                    img_tensor = img_tensor.cuda()
                
                with torch.no_grad():
                    feature = self.model(img_tensor)
                    features.append(feature.squeeze().cpu().numpy())
            except Exception as e:
                print(f"提取特征失败: {path}, 错误: {e}")
                features.append(np.zeros(2048))  # ResNet50特征维度
        
        features = np.vstack(features)
        
        # 使用层次聚类
        clustering = AgglomerativeClustering(
            n_clusters=None,
            distance_threshold=threshold,
            metric='cosine',
            linkage='average'
        )
        
        try:
            return clustering.fit_predict(features)
        except Exception as e:
            print(f"聚类失败: {e}")
            return np.zeros(len(image_paths))

    def crop_around_face(self, img: np.ndarray, face_rect, target_size: tuple = (2048, 2048)) -> Optional[np.ndarray]:
        """围绕人脸裁剪图片
        
        Args:
            img: 原始图片
            face_rect: dlib人脸检测结果
            target_size: 目标最小分辨率
            
        Returns:
            裁剪后的图片，如果无法满足条件则返回None
        """
        try:
            height, width = img.shape[:2]
            face_width = face_rect.right() - face_rect.left()
            face_height = face_rect.bottom() - face_rect.top()
            face_center_x = (face_rect.left() + face_rect.right()) // 2
            face_center_y = (face_rect.top() + face_rect.bottom()) // 2
            
            # 计算合适的裁剪尺寸
            # 确保人脸比例在0.1-0.3之间，且裁剪后分辨率不小于目标尺寸
            for ratio in [3, 4, 5]:  # 尝试不同的裁剪比例
                crop_size = max(face_width * ratio, face_height * ratio)
                
                # 确保裁剪后分辨率不小于目标尺寸
                if crop_size < min(target_size):
                    continue
                    
                # 计算裁剪区域
                crop_left = max(0, face_center_x - crop_size // 2)
                crop_right = min(width, face_center_x + crop_size // 2)
                crop_top = max(0, face_center_y - crop_size // 2)
                crop_bottom = min(height, face_center_y + crop_size // 2)
                
                # 如果裁剪区域超出边界，调整位置
                if crop_left == 0:
                    crop_right = min(width, crop_size)
                if crop_right == width:
                    crop_left = max(0, width - crop_size)
                if crop_top == 0:
                    crop_bottom = min(height, crop_size)
                if crop_bottom == height:
                    crop_top = max(0, height - crop_size)
                
                # 计算实际裁剪尺寸
                actual_width = crop_right - crop_left
                actual_height = crop_bottom - crop_top
                
                # 验证裁剪后的尺寸是否满足要求
                if actual_width >= target_size[0] and actual_height >= target_size[1]:
                    # 计算裁剪后的人脸比例
                    face_area = face_width * face_height
                    crop_area = actual_width * actual_height
                    face_ratio = face_area / crop_area
                    
                    if 0.1 <= face_ratio <= 0.3:
                        # 裁剪图片
                        cropped = img[int(crop_top):int(crop_bottom), 
                                    int(crop_left):int(crop_right)]
                        print(f"裁剪成功: {actual_width}x{actual_height}, 人脸比例: {face_ratio:.1%}")
                        return cropped
            
            print("无法找到合适的裁剪尺寸")
            return None
            
        except Exception as e:
            print(f"裁剪图片失败: {e}")
            return None

    def check_quality(self, image_path: str, config: ProcessConfig) -> Tuple[bool, List[FilterReason]]:
        """检查图片质量"""
        try:
            # 读取图片
            img = ImageProcessor.imread_with_chinese_path(image_path)
            if img is None:
                print(f"无法读取图片: {image_path}")
                return False, [FilterReason('processing_error', '无法读取图片')]
            
            # 收集所有的问题
            filter_reasons = []
            
            # 检查分辨率
            height, width = img.shape[:2]
            if width < config.min_resolution[0] or height < config.min_resolution[1]:
                filter_reasons.append(FilterReason(
                    'resolution_too_small',
                    f'分辨率过小: {width}x{height}',
                    {'width': width, 'height': height}
                ))
            
            # 转换为灰度图
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            
            # 检测人脸
            faces = self.face_detector(gray)
            print(f"检测到 {len(faces)} 个人脸")
            
            # 如果没有检测到人脸
            if len(faces) == 0:
                return False, [FilterReason('no_face', '未检测到人脸')]
            
            # 如果检测到多个人脸
            if len(faces) > 1:
                print(f"检测到多个人脸: {len(faces)}个")
                return False, [FilterReason(
                    'multiple_faces',
                    f'检测到多个人脸: {len(faces)}个',
                    {'face_count': len(faces)}
                )]
            
            # 检查亮度
            brightness = np.mean(gray)
            if brightness < self.face_detection_params['quality_params']['brightness_min']:
                filter_reasons.append(FilterReason(
                    'bad_brightness',
                    f'亮度过低: {brightness:.1f}',
                    {'brightness': float(brightness)}
                ))
            elif brightness > self.face_detection_params['quality_params']['brightness_max']:
                filter_reasons.append(FilterReason(
                    'bad_brightness',
                    f'亮度过高: {brightness:.1f}',
                    {'brightness': float(brightness)}
                ))
            
            # 检查对比度
            contrast = np.std(gray)
            if contrast < self.face_detection_params['quality_params']['contrast']:
                filter_reasons.append(FilterReason(
                    'low_contrast',
                    f'对比度过低: {contrast:.1f}',
                    {'contrast': float(contrast)}
                ))
            
            # 获取人脸区域
            face = faces[0]
            face_width = face.right() - face.left()
            face_height = face.bottom() - face.top()
            face_area = face_width * face_height
            image_area = width * height
            face_ratio = face_area / image_area
            
            # 检查是否需要裁剪
            need_crop = False
            if face_ratio < self.face_detection_params['min_face_ratio']:
                # 尝试裁剪
                cropped = self.crop_around_face(img, face)
                if cropped is not None:
                    # 保存裁剪后的图片
                    crop_path = image_path.replace('.', '_cropped.')
                    cv2.imwrite(crop_path, cropped)
                    print(f"保存裁剪后的图片: {crop_path}")
                    
                    # 重新检查裁剪后的图片
                    return self.check_quality(crop_path, config)
                else:
                    filter_reasons.append(FilterReason(
                        'face_too_small',
                        f'人脸比例过小: {face_ratio:.1%}',
                        {'face_ratio': face_ratio}
                    ))
            
            # 检查人脸位置是否居中
            face_center_x = (face.left() + face.right()) / 2 / width
            face_center_y = (face.top() + face.bottom()) / 2 / height
            center_tolerance = self.face_detection_params['center_tolerance']
            
            if abs(face_center_x - 0.5) > center_tolerance or \
               abs(face_center_y - 0.5) > center_tolerance:
                # 尝试裁剪
                cropped = self.crop_around_face(img, face)
                if cropped is not None:
                    # 保存裁剪后的图片
                    crop_path = image_path.replace('.', '_cropped.')
                    cv2.imwrite(crop_path, cropped)
                    print(f"保存裁剪后的图片: {crop_path}")
                    
                    # 重新检查裁剪后的图片
                    return self.check_quality(crop_path, config)
                else:
                    filter_reasons.append(FilterReason(
                        'face_not_centered',
                        f'人脸未居中: ({face_center_x:.2f}, {face_center_y:.2f})',
                        {'face_center_x': face_center_x, 'face_center_y': face_center_y}
                    ))
            
            # 检查清晰度
            laplacian = cv2.Laplacian(gray, cv2.CV_64F).var()
            if laplacian < self.face_detection_params['quality_params']['sharpness']:
                filter_reasons.append(FilterReason(
                    'low_sharpness',
                    f'图片不够清晰: {laplacian:.1f}',
                    {'sharpness': float(laplacian)}
                ))
            
            # 如果通过所有检查，直接返回成功
            if not filter_reasons:
                return True, []
            
            return False, filter_reasons
            
        except Exception as e:
            print(f"质量检查失败: {e}")
            return False, [FilterReason('processing_error', f'处理出错: {str(e)}')]

    def check_face_ratio(self, face_locations, img_shape, config) -> Tuple[bool, Optional[FilterReason]]:
        """检查人脸比例"""
        if not face_locations:
            return False, FilterReason('no_face', '未检测到人脸')
        
        # 获取最大的人脸区域
        face_area = max(face_locations, key=lambda x: (x[2]-x[0])*(x[3]-x[1]))
        face_height = face_area[2] - face_area[0]
        face_width = face_area[3] - face_area[1]
        
        # 计算人脸占比
        image_area = img_shape[0] * img_shape[1]
        face_area_size = face_height * face_width
        face_ratio = face_area_size / image_area
        
        # 对于高度大于宽度1.5倍的图片(可能是全身照),放宽人脸比例要求
        is_full_body = img_shape[0] > img_shape[1] * 1.5
        min_ratio = config.face_ratio_min * (0.5 if is_full_body else 1.0)
        max_ratio = config.face_ratio_max * (1.5 if is_full_body else 1.0)
        
        if face_ratio < min_ratio or face_ratio > max_ratio:
            return False, FilterReason(
                'face_ratio',
                f'人脸比例不合适: {face_ratio:.2%}',
                {'ratio': face_ratio, 'is_full_body': is_full_body}
            )
        
        return True, None 