from insightface.app import FaceAnalysis
import cv2
import numpy as np
from datetime import datetime
import os
from typing import List, Dict, Union, Optional
import logging
from pymilvus import (
    MilvusClient,
    DataType,
)
import time
from functools import wraps, lru_cache
from concurrent.futures import ThreadPoolExecutor
import threading
import asyncio

def timer(func):
    """函数计时装饰器"""
    @wraps(func)
    def wrapper(*args, **kwargs):
        start = time.perf_counter()
        result = func(*args, **kwargs)
        end = time.perf_counter()
        wrapper.last_time = end - start  # 保存执行时间
        return result
    wrapper.last_time = 0
    return wrapper

class FaceSearcherMilvus:
    def __init__(self, 
                 milvus_host: str = "218.57.240.137", 
                 milvus_port: str = "19530",
                 collection_name: str = "face_vectors",
                 max_workers: int = 4):
        """
        初始化人脸搜索器
        
        Args:
            milvus_host: Milvus服务器地址
            milvus_port: Milvus服务器端口
            collection_name: 集合名称
            max_workers: 线程池大小
        """
        # 配置日志
        logging.basicConfig(level=logging.INFO)
        self.logger = logging.getLogger(__name__)
        
        # 初始化InsightFace，优化参数
        self.app = FaceAnalysis(
            providers=['CPUExecutionProvider'],
            allowed_modules=['detection', 'recognition'],
            # 使用更精确的模型
            # name="buffalo_l"  # 使用大模型，精度更高
            # 使用更快的模型
            name="buffalo_s"  # 使用小模型，速度更快
        )
        # 优化检测参数
        self.app.prepare(
            ctx_id=0, 
            # det_size=(640, 640),  # 增加检测尺寸
            # det_thresh=0.5        # 检测阈值
            det_size=(320, 320),  # 减小检测尺寸以提高速度
            det_thresh=0.5
        )
        
        # 设置识别阈值（如果需要）
        if hasattr(self.app, 'set_recognition_threshold'):
            self.app.set_recognition_threshold(0.5)
        
        # 增加连接重试机制
        self.max_retries = 3
        self.retry_delay = 1  # 秒
        
        try:
            self._init_connections(milvus_host, milvus_port, collection_name, max_workers)
        except Exception as e:
            self.logger.error(f"Milvus初始化失败: {str(e)}")
            raise ConnectionError(f"无法初始化Milvus: {str(e)}")
        
        # 添加缓存
        self._extract_face_embedding = lru_cache(maxsize=1000)(self._extract_face_embedding)
        
        # 初始化性能统计
        self.performance_stats = {
            'preprocess': 0,
            'face_detection': 0,
            'feature_extraction': 0,
            'vector_search': 0,
            'total': 0
        }
    
    def _init_connections(self, milvus_host, milvus_port, collection_name, max_workers):
        """初始化连接池"""
        for attempt in range(self.max_retries):
            try:
                # 初始化线程池
                self.executor = ThreadPoolExecutor(max_workers=max_workers)
                
                # 初始化连接池
                self.client_pool = []
                self.collection_name = collection_name
                
                # 创建第一个连接并检查/创建集合
                client = MilvusClient(
                    uri=f"http://{milvus_host}:{milvus_port}",
                    timeout=30  # 设置超时时间
                )
                
                if not client.has_collection(collection_name):
                    self._create_collection(client)
                
                client.load_collection(collection_name)
                self.client_pool.append(client)
                
                # 创建其他连接
                for _ in range(max_workers - 1):
                    client = MilvusClient(
                        uri=f"http://{milvus_host}:{milvus_port}",
                        timeout=30
                    )
                    self.client_pool.append(client)
                
                self.logger.info("Milvus连接成功，集合已加载")
                break
                
            except Exception as e:
                if attempt < self.max_retries - 1:
                    self.logger.warning(f"连接失败，尝试重连 ({attempt + 1}/{self.max_retries})")
                    time.sleep(self.retry_delay)
                else:
                    raise

    def get_client(self):
        """从连接池获取客户端"""
        return self.client_pool[hash(threading.current_thread().ident) % len(self.client_pool)]

    async def search_similar_faces_async(self, 
                                       query_image: str, 
                                       threshold: float = 0.6, 
                                       top_k: int = 5):
        """异步搜索接口"""
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(
            self.executor, 
            self.search_similar_faces, 
            query_image, 
            threshold, 
            top_k
        )

    def _create_collection(self, client):
        """创建集合"""
        try:
            # 创建schema
            schema = client.create_schema(
                auto_id=False,
                enable_dynamic_fields=True,
                description="人脸特征向量集合"
            )
            
            # 添加字段
            schema.add_field(
                field_name="face_id", 
                datatype=DataType.VARCHAR, 
                is_primary=True,
                max_length=100
            )
            schema.add_field(
                field_name="person_id", 
                datatype=DataType.VARCHAR,
                max_length=100
            )
            schema.add_field(
                field_name="embedding", 
                datatype=DataType.FLOAT_VECTOR,
                dim=512
            )
            schema.add_field(
                field_name="metadata",
                datatype=DataType.JSON
            )
            
            # 创建集合
            client.create_collection(
                collection_name=self.collection_name,
                schema=schema,
                consistency_level="Strong"
            )
            
            # 创建索引
            index_params = client.prepare_index_params()
            index_params.add_index(field_name="face_id")
            index_params.add_index(
                field_name="embedding",
                index_type="IVF_FLAT",
                metric_type="COSINE",
                params={"nlist": 1024}
            )
            
            client.create_index(
                collection_name=self.collection_name,
                index_params=index_params
            )
            
            self.logger.info(f"创建集合成功: {self.collection_name}")
            
        except Exception as e:
            self.logger.error(f"创建集合失败: {str(e)}")
            raise

    @timer
    def _preprocess_image(self, img: np.ndarray) -> np.ndarray:
        """图像预处理"""
        try:
            # 检查图像尺寸
            height, width = img.shape[:2]
            if width < 100 or height < 100:
                # 放大小图片
                scale = max(100/width, 100/height)
                img = cv2.resize(img, None, fx=scale, fy=scale)
            elif width > 1920 or height > 1920:
                # 缩小大图片
                scale = min(1920/width, 1920/height)
                img = cv2.resize(img, None, fx=scale, fy=scale)
            
            # 亮度调整
            lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
            l, a, b = cv2.split(lab)
            clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
            l = clahe.apply(l)
            lab = cv2.merge((l,a,b))
            img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
            
            return img
        except Exception as e:
            self.logger.warning(f"图像预处理失败: {str(e)}")
            return img

    def _check_face_quality(self, face, img) -> bool:
        """检查人脸质量"""
        try:
            # 检查人脸大小
            bbox = face.bbox
            face_width = bbox[2] - bbox[0]
            face_height = bbox[3] - bbox[1]
            img_area = img.shape[0] * img.shape[1]
            face_area = face_width * face_height
            if face_area / img_area < 0.01:  # 人脸太小
                return False
            
            # 检查人脸角度
            if hasattr(face, 'kps'):  # 如果有关键点信息
                kps = face.kps
                # 计算眼睛和嘴巴的倾斜角度
                eye_angle = np.abs(np.arctan2(kps[1][1] - kps[0][1], 
                                            kps[1][0] - kps[0][0]))
                if eye_angle > 0.3:  # 约17度
                    return False
            
            # 检查人脸清晰度
            face_img = img[int(bbox[1]):int(bbox[3]), 
                         int(bbox[0]):int(bbox[2])]
            if face_img.size > 0:
                clarity = cv2.Laplacian(cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY), 
                                      cv2.CV_64F).var()
                if clarity < 100:  # 图片不够清晰
                    return False
            
            return True
        except Exception as e:
            self.logger.warning(f"人脸质量检查失败: {str(e)}")
            return True  # 如果检查失败，默认通过

    @timer
    def _extract_face_embedding(self, image_path: str) -> Optional[np.ndarray]:
        """从图片中提取人脸特征向量"""
        try:
            # 1. 读取并预处理图片
            img = cv2.imread(image_path)
            if img is None:
                self.logger.error(f"无法读取图片: {image_path}")
                return None
            
            # 2. 优化图像尺寸
            max_size = 640  # 限制最大尺寸
            height, width = img.shape[:2]
            if max(width, height) > max_size:
                scale = max_size / max(width, height)
                img = cv2.resize(img, None, fx=scale, fy=scale)
            
            # 3. 快速人脸检测
            t4 = time.perf_counter()
            faces = self.app.get(img)
            t5 = time.perf_counter()
            self.logger.info(f"人脸检测耗时: {(t5-t4)*1000:.2f}ms")
            self.performance_stats['face_detection'] = t5 - t4
            
            if not faces:
                self.logger.warning(f"未检测到人脸: {image_path}")
                return None
            
            # 4. 如果检测到多个人脸，选择最大的人脸
            if len(faces) > 1:
                # 根据人脸框大小选择最大的人脸
                face = max(faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))
            else:
                face = faces[0]
            
            # 5. 特征提取
            embedding = face.embedding
            
            return embedding
            
        except Exception as e:
            self.logger.error(f"特征提取失败: {str(e)}")
            return None

    def add_face(self, image_path: str, person_id: str, metadata: Dict = None) -> bool:
        """
        添加人脸到数据库
        
        Args:
            image_path: 图片路径
            person_id: 人物ID
            metadata: 额外的元数据信息
            
        Returns:
            bool: 是否添加成功
        """
        embedding = self._extract_face_embedding(image_path)
        if embedding is None:
            return False
            
        try:
            # 准备数据
            face_id = f"face_{datetime.now().timestamp()}"
            entity = {
                "face_id": face_id,
                "person_id": person_id,
                "embedding": embedding.tolist(),
                "metadata": metadata or {}
            }
            
            # 插入数据
            result = self.get_client().insert(
                collection_name=self.collection_name,
                data=[entity]
            )
            
            self.logger.info(f"成功添加人脸: {person_id}, 插入数量: {result['insert_count']}")
            return True
            
        except Exception as e:
            self.logger.error(f"添加人脸失败: {str(e)}")
            return False
    
    def batch_add_faces(self, image_dir: str, person_id: str, metadata: Dict = None) -> int:
        """
        批量添加人脸
        
        Args:
            image_dir: 图片目录路径
            person_id: 人物ID
            metadata: 额外的元数据信息
            
        Returns:
            int: 成功添加的人脸数量
        """
        success_count = 0
        for filename in os.listdir(image_dir):
            if filename.lower().endswith(('.jpg', '.jpeg', '.png')):
                image_path = os.path.join(image_dir, filename)
                if self.add_face(image_path, person_id, metadata):
                    success_count += 1
        return success_count
    
    def delete_person(self, person_id: str) -> int:
        """
        删除某个人的所有人脸记录
        
        Args:
            person_id: 人物ID
            
        Returns:
            int: 删除的记录数量
        """
        try:
            expr = f'person_id == "{person_id}"'
            self.get_client().delete(
                collection_name=self.collection_name,
                filter=expr
            )
            return 1
        except Exception as e:
            self.logger.error(f"删除人脸失败: {str(e)}")
            return 0
    
    def __del__(self):
        """清理资源"""
        try:
            # 先关闭线程池
            if hasattr(self, 'executor'):
                self.logger.info("正在关闭线程池...")
                self.executor.shutdown(wait=False)  # 改为不等待

            # 释放 Milvus 资源
            if hasattr(self, 'client_pool'):
                self.logger.info("正在释放 Milvus 资源...")
                for client in self.client_pool:
                    try:
                        client.release_collection(self.collection_name)
                    except Exception as e:
                        self.logger.warning(f"释放集合失败: {str(e)}")
                self.client_pool.clear()

        except Exception as e:
            self.logger.error(f"清理资源失败: {str(e)}")
        finally:
            self.logger.info("资源清理完成")

    def cleanup(self):
        """主动清理资源的方法"""
        self.__del__()

    @timer
    def search_similar_faces(self, 
                           query_image: str, 
                           threshold: float = 0.6, 
                           top_k: int = 5) -> Optional[List[Dict]]:
        """搜索相似人脸"""
        # 记录总开始时间
        start_total = time.perf_counter()
        timings = {}

        try:
            # 1. 图片读取和预处理
            t1 = time.perf_counter()
            embedding = self._extract_face_embedding(query_image)
            t2 = time.perf_counter()
            timings['face_extraction'] = round((t2 - t1) * 1000, 2)
            self.logger.info(f"人脸特征提取耗时: {timings['face_extraction']}ms")
            
            if embedding is None:
                return None

            # 2. 准备搜索参数
            t3 = time.perf_counter()
            search_params = {
                "metric_type": "COSINE",
                "params": {"nprobe": min(top_k * 10, 50)}
            }
            t4 = time.perf_counter()
            timings['params_preparation'] = round((t4 - t3) * 1000, 2)
            
            # 3. 向量搜索
            t5 = time.perf_counter()
            results = self.get_client().search(
                collection_name=self.collection_name,
                data=[embedding.tolist()],
                anns_field="embedding",
                search_params=search_params,
                limit=top_k * 2,
                output_fields=["person_id", "metadata"]
            )
            t6 = time.perf_counter()
            timings['vector_search'] = round((t6 - t5) * 1000, 2)
            self.logger.info(f"向量搜索耗时: {timings['vector_search']}ms")
            
            # 4. 结果处理
            t7 = time.perf_counter()
            similar_faces = []
            for result in results:
                for hit in result:
                    score = hit["distance"]
                    entity = hit["entity"]
                    
                    if score >= threshold:
                        similar_faces.append({
                            "person_id": entity.get("person_id"),
                            "similarity": float(score),
                            "metadata": entity.get("metadata"),
                            "confidence": self._calculate_confidence(score)
                        })
            
            similar_faces.sort(key=lambda x: x["similarity"], reverse=True)
            result = similar_faces[:top_k]
            t8 = time.perf_counter()
            timings['result_processing'] = round((t8 - t7) * 1000, 2)
            
            # 5. 计算总耗时
            end_total = time.perf_counter()
            total_time = end_total - start_total
            timings['total'] = round(total_time * 1000, 2)
            
            # 记录详细日志
            self.logger.info("搜索性能统计:")
            self.logger.info(f"├── 人脸特征提取: {timings['face_extraction']}ms")
            self.logger.info(f"├── 参数准备: {timings['params_preparation']}ms")
            self.logger.info(f"├── 向量搜索: {timings['vector_search']}ms")
            self.logger.info(f"├── 结果处理: {timings['result_processing']}ms")
            self.logger.info(f"└── 总耗时: {timings['total']}ms")
            
            # 返回结果和性能统计
            return {
                "results": result,
                "performance": {
                    "total_time_ms": timings['total'],
                    "face_extraction_ms": timings['face_extraction'],
                    "params_preparation_ms": timings['params_preparation'],
                    "vector_search_ms": timings['vector_search'],
                    "result_processing_ms": timings['result_processing'],
                    # 保留原有的性能统计
                    "preprocess_time_ms": round(self.performance_stats['preprocess'] * 1000, 2),
                    "face_detection_time_ms": round(self.performance_stats['face_detection'] * 1000, 2),
                    "feature_extraction_time_ms": round(self.performance_stats['feature_extraction'] * 1000, 2)
                }
            }
            
        except Exception as e:
            self.logger.error(f"搜索人脸失败: {str(e)}")
            self.logger.error(f"结果格式: {results if 'results' in locals() else 'N/A'}")
            return None
    
    def _calculate_confidence(self, similarity: float) -> float:
        """计算置信度分数"""
        # 将相似度转换为更有意义的置信度分数
        if similarity >= 0.9:
            return 1.0
        elif similarity >= 0.8:
            return 0.9
        elif similarity >= 0.7:
            return 0.8
        elif similarity >= 0.6:
            return 0.7
        else:
            return 0.4 