import torch
import numpy as np
from PIL import Image
import cv2
from piq import brisque
import torchvision.transforms as T
import face_recognition
import os
from typing import Tuple, List
import logging
from dataclasses import dataclass
from datetime import datetime
import math
import time

@dataclass
class FilterReason:
    """过滤原因"""
    code: str
    message: str

@dataclass
class ImageAnalysis:
    """图片分析结果"""
    laplacian_score: float = 0.0
    contrast_score: float = 0.0
    brightness_score: float = 0.0
    quality_score: float = 0.0
    blur_score: float = 0.0

@dataclass
class ProcessResult:
    """处理结果"""
    status: str = 'wait'  # wait, processing, success, failed
    processed_path: str = ''
    filter_reasons: List[FilterReason] = None
    analysis: ImageAnalysis = None
    face_count: int = 0
    face_ratio: float = 0.0
    
    def __post_init__(self):
        if self.filter_reasons is None:
            self.filter_reasons = []
        if self.analysis is None:
            self.analysis = ImageAnalysis()

class ImageProcessor:
    def __init__(self):
        self.transform = T.Compose([T.ToTensor()])
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger = logging.getLogger(__name__)
    
    def check_quality(self, image_path: str, threshold: float) -> bool:
        """检查图片质量"""
        try:
            # 读取图片
            img = ImageProcessor.imread_with_chinese_path(image_path)
            if img is None:
                self.logger.error(f"无法读取图片: {image_path}")
                return False
            
            # 获取原始分辨率
            height, width = img.shape[:2]
            
            # 缩放到固定尺寸进行评估(1024x1024)
            target_size = 1024
            scale = min(target_size/width, target_size/height)
            if scale < 1:
                new_width = int(width * scale)
                new_height = int(height * scale)
                img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_AREA)
            
            # 转换为灰度图
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            
            # 多尺度清晰度分析
            blur_scores = []
            pyramid_scales = [1.0, 0.5, 0.25]  # 多个尺度
            
            for scale in pyramid_scales:
                if scale != 1.0:
                    scaled_gray = cv2.resize(gray, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
                else:
                    scaled_gray = gray
                    
                # 计算拉普拉斯方差
                laplacian_var = cv2.Laplacian(scaled_gray, cv2.CV_64F).var()
                
                # 计算Sobel梯度
                sobelx = cv2.Sobel(scaled_gray, cv2.CV_64F, 1, 0, ksize=3)
                sobely = cv2.Sobel(scaled_gray, cv2.CV_64F, 0, 1, ksize=3)
                sobel_var = (sobelx**2 + sobely**2).var()
                
                # 结合两种方法的分数
                blur_score = (laplacian_var + sobel_var * 0.5) / (scale * scale)  # 考虑尺度影响
                blur_scores.append(blur_score)
            
            # 加权平均不同尺度的分数
            weights = [0.5, 0.3, 0.2]  # 权重递减
            final_blur_score = sum(score * weight for score, weight in zip(blur_scores, weights))
            
            # 根据分辨率动态调整阈值
            resolution_factor = min(1.0, math.sqrt((width * height) / (1920 * 1080)))
            adjusted_threshold = threshold * resolution_factor
            
            # 检查亮度分布
            brightness = cv2.mean(gray)[0]
            brightness_score = 1.0 - abs(128 - brightness) / 128.0
            
            # 检查对比度
            contrast = gray.std()
            contrast_score = min(contrast / 80.0, 1.0)
            
            # 综合评分
            quality_score = (
                (final_blur_score / 1000.0) * 0.5 +  # 清晰度权重
                brightness_score * 0.25 +            # 亮度权重
                contrast_score * 0.25                # 对比度权重
            )
            
            self.logger.info(f"""
                图片质量评估结果:
                - 分辨率: {width}x{height}
                - 清晰度得分: {final_blur_score:.2f}
                - 亮度得分: {brightness_score:.2f}
                - 对比度得分: {contrast_score:.2f}
                - 最终得分: {quality_score:.2f}
                - 调整后阈值: {adjusted_threshold:.2f}
            """)
            
            return quality_score >= adjusted_threshold
            
        except Exception as e:
            self.logger.error(f"检查图片质量时出错: {e}", exc_info=True)
            return False
    
    def analyze_face(self, img_path):
        try:
            # 使用face_recognition库进行人脸检测
            image = face_recognition.load_image_file(img_path)
            face_locations = face_recognition.face_locations(image)
            face_landmarks = face_recognition.face_landmarks(image)
            
            if not face_locations or not face_landmarks:
                return None
            
            # 获取最大的人脸区域
            face_area = max(face_locations, key=lambda x: (x[2]-x[0])*(x[3]-x[1]))
            top, right, bottom, left = face_area
            
            # 计算人脸面积比例
            img_height, img_width = image.shape[:2]
            face_area_size = (bottom-top) * (right-left)
            img_area = img_height * img_width
            face_ratio = face_area_size / img_area
            
            # 获取关键点
            landmarks = face_landmarks[0]  # 使用第一个检测到的人脸的关键点
            
            return {
                'face_ratio': face_ratio,
                'yaw': self._calc_yaw(landmarks),
                'pitch': self._calc_pitch(landmarks),
                'blur': self._calc_face_blur(img_path, (left, top, right, bottom))
            }
        except Exception as e:
            print(f"人脸分析错误: {str(e)}")
            return None
    
    def _calc_yaw(self, landmarks):
        # 使用左右眼和鼻子位置计算偏航角
        left_eye = np.mean(landmarks['left_eye'], axis=0)
        right_eye = np.mean(landmarks['right_eye'], axis=0)
        nose_tip = landmarks['nose_tip'][0]
        
        eye_center = (left_eye + right_eye) / 2
        eye_distance = np.linalg.norm(right_eye - left_eye)
        nose_displacement = nose_tip[0] - eye_center[0]
        
        # 简化的偏航角计算
        yaw = np.arctan2(nose_displacement, eye_distance) * 180 / np.pi
        return yaw
    
    def _calc_pitch(self, landmarks):
        # 使用眼睛和下巴位置计算俯仰角
        left_eye = np.mean(landmarks['left_eye'], axis=0)
        right_eye = np.mean(landmarks['right_eye'], axis=0)
        chin = landmarks['chin'][0]
        
        eye_center = (left_eye + right_eye) / 2
        vertical_distance = chin[1] - eye_center[1]
        horizontal_distance = abs(chin[0] - eye_center[0])
        
        # 简化的俯仰角计算
        pitch = np.arctan2(vertical_distance, horizontal_distance) * 180 / np.pi - 90
        return pitch
    
    def _calc_face_blur(self, img_path, face_area):
        img = self.imread_with_chinese_path(img_path)
        left, top, right, bottom = face_area
        face_region = img[top:bottom, left:right]
        gray = cv2.cvtColor(face_region, cv2.COLOR_BGR2GRAY)
        return cv2.Laplacian(gray, cv2.CV_64F).var()
    
    def crop_image(self, image_path: str, output_path: str = None) -> Tuple[bool, str]:
        """裁剪图片"""
        try:
            # 读取图片
            img = self.imread_with_chinese_path(image_path)
            if img is None:
                return False, "无法读取图片"
            
            # 检测人脸
            face_locations = face_recognition.face_locations(img)
            if not face_locations:
                return False, "未检测到人脸"
            
            # 获取人脸区域
            top, right, bottom, left = face_locations[0]
            face_center_x = (left + right) // 2
            face_center_y = (top + bottom) // 2
            
            # 计算裁剪区域
            crop_size = 2048
            half_size = crop_size // 2
            
            # 确保裁剪区域在图片范围内
            start_x = max(0, face_center_x - half_size)
            start_y = max(0, face_center_y - half_size)
            end_x = min(img.shape[1], start_x + crop_size)
            end_y = min(img.shape[0], start_y + crop_size)
            
            # 调整起始点以保持裁剪大小
            if end_x - start_x < crop_size:
                start_x = max(0, end_x - crop_size)
            if end_y - start_y < crop_size:
                start_y = max(0, end_y - crop_size)
            
            # 裁剪图片
            cropped = img[start_y:end_y, start_x:end_x]
            
            # 如果裁剪区域小于目标大小，进行填充
            if cropped.shape[0] < crop_size or cropped.shape[1] < crop_size:
                new_img = np.zeros((crop_size, crop_size, 3), dtype=np.uint8)
                y_offset = (crop_size - cropped.shape[0]) // 2
                x_offset = (crop_size - cropped.shape[1]) // 2
                new_img[y_offset:y_offset+cropped.shape[0], 
                       x_offset:x_offset+cropped.shape[1]] = cropped
                cropped = new_img
            
            # 保存裁剪后的图片
            if output_path is None:
                filename = os.path.basename(image_path)
                base, ext = os.path.splitext(filename)
                output_path = os.path.join(os.path.dirname(image_path), f"{base}_cropped{ext}")
            
            cv2.imwrite(output_path, cropped)
            return True, output_path
            
        except Exception as e:
            return False, str(e)
    
    def process_image(self, image_path: str) -> ProcessResult:
        """处理图片"""
        try:
            start_time = time.time()
            result = ProcessResult()
            
            # 读取图片
            img = self.imread_with_chinese_path(image_path)
            if img is None:
                result.filter_reasons.append(
                    FilterReason(code='read_error', message='无法读取图片')
                )
                return result
            
            # 检测人脸
            face_count = self.detect_faces(img)
            result.face_count = face_count
            
            if face_count == 0:
                result.filter_reasons.append(
                    FilterReason(code='no_face', message='未检测到人脸')
                )
                return result
            
            # 计算人脸占比
            face_ratio = self.calculate_face_ratio(img)
            result.face_ratio = face_ratio
            
            # 分析图片质量
            analysis = self.analyze_image(img)
            result.analysis = analysis
            
            # 应用过滤规则
            if face_count > 1:
                result.filter_reasons.append(
                    FilterReason(code='multi_face', 
                               message=f'检测到多个人脸: {face_count}')
                )
                return result
            
            # 检查人脸占比 (>0.02)
            if face_ratio < 0.02:
                result.filter_reasons.append(
                    FilterReason(code='small_face',
                               message=f'人脸占比过小: {face_ratio:.2%}')
                )
                return result
            
            # 检查清晰度 (>40分)
            if analysis.blur_score < 40:
                result.filter_reasons.append(
                    FilterReason(code='blur',
                               message=f'图片模糊: {analysis.blur_score:.1f}分')
                )
                return result
            
            # 检查对比度 (>50分)
            if analysis.contrast_score < 50:
                result.filter_reasons.append(
                    FilterReason(code='contrast',
                               message=f'对比度不足: {analysis.contrast_score:.1f}分')
                )
                return result
            
            # 检查亮度 (>30分)
            if analysis.brightness_score < 30:
                result.filter_reasons.append(
                    FilterReason(code='brightness',
                               message=f'亮度不足: {analysis.brightness_score:.1f}分')
                )
                return result
            
            # 所有条件都满足
            result.status = 'success'
            result.processed_path = image_path
            return result
            
        except Exception as e:
            self.logger.error(f"处理图片失败: {e}", exc_info=True)
            result = ProcessResult()
            result.filter_reasons.append(
                FilterReason(code='process_error', message=f'处理失败: {str(e)}')
            )
            return result
            
    def analyze_image(self, image) -> ImageAnalysis:
        """分析图片"""
        analysis = ImageAnalysis()
        
        try:
            # 计算模糊度分数 (0-100)
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            laplacian = cv2.Laplacian(gray, cv2.CV_64F).var()
            analysis.laplacian_score = min(laplacian / 500.0 * 100, 100)  # 归一化到0-100
            analysis.blur_score = analysis.laplacian_score
            
            # 计算对比度分数 (0-100)
            contrast = gray.std()
            analysis.contrast_score = min(contrast / 80.0 * 100, 100)  # 归一化到0-100
            
            # 计算亮度分数 (0-100)
            brightness = np.mean(gray)
            analysis.brightness_score = (1.0 - abs(128 - brightness) / 128.0) * 100
            
            # 计算综合质量分数 (0-100)
            analysis.quality_score = (
                analysis.laplacian_score * 0.4 +  # 清晰度权重40%
                analysis.contrast_score * 0.3 +   # 对比度权重30%
                analysis.brightness_score * 0.3    # 亮度权重30%
            )
            
            return analysis
            
        except Exception as e:
            self.logger.error(f"分析图片失败: {e}", exc_info=True)
            return analysis
            
    def detect_faces(self, image) -> int:
        """检测人脸数量"""
        try:
            # 缩放图片以提高性能
            height, width = image.shape[:2]
            max_dimension = 1024
            if max(height, width) > max_dimension:
                scale = max_dimension / max(height, width)
                new_width = int(width * scale)
                new_height = int(height * scale)
                image = cv2.resize(image, (new_width, new_height))
            
            # 转换为RGB格式
            rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            
            # 使用HOG检测器进行人脸检测
            face_locations = face_recognition.face_locations(
                rgb_image,
                model="hog",  # 使用更快的HOG模型
                number_of_times_to_upsample=0  # 减少上采样次数以提高速度
            )
            
            # 缓存检测结果，供calculate_face_ratio使用
            setattr(self, '_last_face_locations', face_locations)
            setattr(self, '_last_image_shape', image.shape[:2])
            
            return len(face_locations)
            
        except Exception as e:
            self.logger.error(f"人脸检测失败: {e}", exc_info=True)
            return 0
            
    def calculate_face_ratio(self, image) -> float:
        """计算人脸占比"""
        try:
            # 缩放图片以提高性能
            height, width = image.shape[:2]
            max_dimension = 1024
            if max(height, width) > max_dimension:
                scale = max_dimension / max(height, width)
                new_width = int(width * scale)
                new_height = int(height * scale)
                image = cv2.resize(image, (new_width, new_height))
            
            # 转换为RGB格式
            rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            
            # 使用HOG检测器进行人脸检测
            face_locations = face_recognition.face_locations(
                rgb_image,
                model="hog",  # 使用更快的HOG模型
                number_of_times_to_upsample=0  # 减少上采样次数以提高速度
            )
            
            if not face_locations:
                return 0.0
                
            # 计算所有人脸区域的总面积
            total_face_area = sum(
                (bottom - top) * (right - left)
                for top, right, bottom, left in face_locations
            )
            
            # 计算图片总面积
            image_area = image.shape[0] * image.shape[1]
            
            # 计算占比
            ratio = total_face_area / image_area
            
            # 如果使用了缩放图片，结果应该是一样的，不需要调整
            return ratio
            
        except Exception as e:
            self.logger.error(f"计算人脸占比失败: {e}", exc_info=True)
            return 0.0 
        

    # 读取中文路径的图片
    @staticmethod
    def imread_with_chinese_path(image_path):
        # 以二进制模式读取文件
        with open(image_path, "rb") as f:
            image_data = f.read()
        # 将二进制数据转换为 NumPy 数组
        image_array = np.frombuffer(image_data, dtype=np.uint8)
        # 使用 cv2.imdecode 解码图像
        image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
        return image