import base64
import json
import os
import traceback
from typing import Any, Dict, List, Optional, Tuple

import cv2
import numpy as np
import onnx
from insightface.app import FaceAnalysis

from configs.global_config import global_config
from entity.face_types import FaceFeature
from utils.logger_config import get_logger_config


class FaceFeatureExtractor:
    def __init__(
        self,
        model_name: str = "buffalo_l",
        ctx_id: Optional[int] = None,
        det_size: Tuple[int, int] = (640, 640),
        det_thresh: float = 0.6,  # 新增默认检测阈值参数
    ) -> None:
        """
        初始化人脸特征提取器

        参数:
            model_name: 使用的模型名称
            ctx_id: 设备ID (0表示CPU)
            det_size: 检测尺寸
            det_thresh: 人脸检测置信度阈值，默认0.6
        """
        self.logger = get_logger_config(name="face_feature_extractor").get_logger()
        try:
            # 如果ctx_id没有作为参数传递，则从配置文件中读取
            if ctx_id is None:
                self.ctx_id = global_config.get("insightface.ctx_id", 0)
            else:
                self.ctx_id = ctx_id
                
            # 检查是否配置了本地模型路径
            local_model_path = global_config.get("insightface.local_model_path", None)
            
            self.model_name = model_name
            self.det_thresh = det_thresh
            self.det_size = det_size
            
            # 根据配置决定使用本地模型还是默认模型
            if local_model_path and os.path.exists(local_model_path):
                self.logger.info(f"使用本地 InsightFace 模型路径: {local_model_path}")
                self.app = FaceAnalysis(name=model_name, root=local_model_path)
            else:
                self.logger.info(f"使用默认 InsightFace 模型: {model_name}")
                self.app = FaceAnalysis(name=model_name)
                
            self.app.prepare(
                ctx_id=self.ctx_id, det_thresh=det_thresh, det_size=det_size
            )  # 设置设备id，检测阈值，范围大小，这里存在一个问题，检测大小如何影响检测结果？如何动态调整？
        except onnx.onnx_cpp2py_export.checker.ValidationError as e:
            self.logger.error("ONNX 模型验证失败，可能文件损坏。详细错误: %s", e)
            raise
        except Exception as e:
            self.logger.error("初始化模型时出错: %s", e)
            raise

    def set_prepare(
        self,
        det_thresh: float = 0.6,  # 新增默认检测阈值参数
    ):
        """
        设置模型准备参数

        参数:
            det_thresh: 人脸检测置信度阈值，默认0.6
            det_size: 检测尺寸，默认(640, 640)
        """
        try:
            self.app.prepare(
                ctx_id=self.ctx_id, det_thresh=det_thresh, det_size=self.det_size
            )
        except Exception as e:
            self.logger.error("设置模型准备参数时出错: %s", e)

    def _image_process(self, image_path: str) -> Tuple[List[FaceFeature], str]:
        """
        图片处理函数，用于提取人脸特征
        参数:
            image_path: 图片路径
        返回:
            img: 图片数据
            error: 错误信息
        """
        self.logger.info(f"读取图片: {image_path}")
        img: Any = None
        # 检查图片路径是否以 .webp 结尾（忽略大小写）
        if image_path.lower().endswith(".webp"):
            try:
                # 导入 PIL 库中的 Image 模块
                from PIL import Image

                # 使用 PIL 库打开 .webp 格式的图片
                pil_img = Image.open(image_path)
                # 检查图片模式是否为 RGB，若不是则转换为 RGB 模式
                if pil_img.mode != "RGB":
                    pil_img = pil_img.convert("RGB")
                # 将 PIL 图像对象转换为 numpy 数组
                img = np.array(pil_img)
                # 将颜色空间从 RGB 转换为 BGR，因为 OpenCV 默认使用 BGR 格式
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            except Exception as e:
                # 若处理 .webp 图片过程中出现异常，记录错误日志
                self.logger.error(f"读取图片时出错: {e}")
                # 返回空列表，表示没有提取到人脸特征
                return None, f"读取webp图片时出错: {e}"
        else:
            # 若图片不是 .webp 格式，使用 OpenCV 直接读取图片
            img = cv2.imread(image_path)

        return img, None

    def extract_features(
        self,
        image_path: str,
        output_dir: Optional[str] = None,
        visualize: bool = False,
        thresh: Optional[float] = None,  # 新增参数，允许临时修改阈值
    ) -> List[FaceFeature]:
        """
        提取图片中人脸的特征数据

        参数:
            image_path: 输入图片路径
            output_dir: 输出目录，如果提供则保存结果
            visualize: 是否可视化结果
            thresh: 人脸检测置信度阈值，若提供则临时覆盖初始化时的阈值

        返回:
            包含人脸特征的字典列表
        """
        try:
            img, error = self._image_process(image_path)
            # 检查图片是否读取成功，若 img 为 None 表示读取失败
            if img is None:
                self.logger.error(f"读取图片{image_path}失败: {error}")
                return []

            # 检测人脸
            self.logger.info(f"检测人脸特征，阈值={thresh}...")

            if thresh is not None:
                # 应用参数阈值
                self.app.prepare(
                    ctx_id=self.ctx_id,
                    det_thresh=thresh,
                    det_size=self.det_size,
                )

            faces = self.app.get(img)

            if not faces:
                self.logger.info("未检测到人脸")
                return []

            self.logger.info(f"检测到 {len(faces)} 个人脸")

            # 提取特征
            results = []
            for i, face in enumerate(faces):
                # 获取基本信息
                bbox = face.bbox.astype(int).tolist()

                # 获取关键点
                landmark = None
                if hasattr(face, "kps") and face.kps is not None:
                    landmark = face.kps.astype(float).tolist()

                embedding = (
                    face.embedding.tolist() if hasattr(face, "embedding") else None
                )

                # 安全地获取性别和年龄
                gender = (
                    int(face.gender)
                    if hasattr(face, "gender") and face.gender is not None
                    else None
                )
                age = (
                    int(face.age)
                    if hasattr(face, "age") and face.age is not None
                    else None
                )

                # 创建结果字典
                face_info = {
                    "face_id": i,
                    "bbox": bbox,  # [x1, y1, x2, y2]
                    "landmark": landmark,
                    "embedding_dimension": len(embedding) if embedding else None,
                    "embedding": embedding,
                    "gender": gender,
                    "age": age,
                }

                results.append(face_info)

            # 可视化结果
            vis_img = None
            if visualize:
                vis_img = img.copy()
                for face in faces:
                    bbox = face.bbox.astype(int)
                    cv2.rectangle(
                        vis_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2
                    )

                    # 如果有性别和年龄信息，显示出来
                    if (
                        hasattr(face, "gender")
                        and face.gender is not None
                        and hasattr(face, "age")
                        and face.age is not None
                    ):
                        gender = "Male" if face.gender == 1 else "Female"
                        label = f"{gender}, {int(face.age)}"
                        cv2.putText(
                            vis_img,
                            label,
                            (bbox[0], bbox[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.8,
                            (0, 255, 0),
                            2,
                        )

                cv2.imshow("Detected Faces", vis_img)
                cv2.waitKey(0)
                cv2.destroyAllWindows()

            # 保存结果
            if output_dir:
                os.makedirs(output_dir, exist_ok=True)

                # 保存JSON结果
                base_name = os.path.splitext(os.path.basename(image_path))[0]
                json_path = os.path.join(output_dir, f"{base_name}_features.json")

                with open(json_path, "w", encoding="utf-8") as f:
                    json.dump(results, f, indent=2)

                self.logger.info(f"特征数据已保存到: {json_path}")

                # 保存可视化结果
                if visualize and vis_img is not None:
                    vis_path = os.path.join(output_dir, f"{base_name}_visualized.jpg")
                    cv2.imwrite(vis_path, vis_img)
                    self.logger.info(f"可视化结果已保存到: {vis_path}")

            return results
        except Exception:
            self.logger.error(traceback.format_exc())
            return []
        finally:
            self.app.prepare(
                ctx_id=self.ctx_id,
                det_thresh=thresh,
                det_size=self.det_size,
            )  # 恢复默认参数

        # 显式进行垃圾回收
        # gc.collect()
        # return results

    def visualize_bboxes(
        self, 
        bboxes: List[List[int]], 
        width: int = 512, 
        height: int = 512,
        line_thickness: int = 2
    ) -> Dict[str, Any]:
        """
        在透明背景上绘制多个边界框

        参数:
            bboxes: 边界框列表，每个边界框格式为 [x1, y1, x2, y2, score(可选)]
            width: 生成图像的宽度
            height: 生成图像的高度
            line_thickness: 边界框线宽

        返回:
            包含base64编码图像和其他信息的字典
        """
        try:
            # 创建完全透明的RGBA图像
            img = np.zeros((height, width, 4), dtype=np.uint8)
            img[:, :, 3] = 0  # 设置完全透明

            # 定义边界框颜色 (BGR格式)
            color = (0, 255, 0, 255)  # 绿色，完全不透明

            # 绘制每个边界框
            for bbox in bboxes:
                if not bbox or len(bbox) < 4:
                    continue
                    
                # 解析边界框坐标
                x1, y1, x2, y2 = map(int, bbox[:4])
                
                # 确保坐标在图像范围内
                x1, y1 = max(0, x1), max(0, y1)
                x2, y2 = min(width - 1, x2), min(height - 1, y2)
                
                # 绘制边界框
                cv2.rectangle(
                    img, 
                    (x1, y1), 
                    (x2, y2), 
                    color, 
                    thickness=line_thickness
                )
                
                # 如果有关键点分数，可以添加分数显示
                if len(bbox) > 4 and isinstance(bbox[4], (int, float)):
                    score = bbox[4]
                    label = f"{score:.2f}"
                    
                    # 计算文本大小
                    (text_width, text_height), _ = cv2.getTextSize(
                        label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1
                    )
                    
                    # 绘制分数背景
                    cv2.rectangle(
                        img,
                        (x1, y1 - text_height - 4),
                        (x1 + text_width, y1),
                        color,
                        -1  # 填充矩形
                    )
                    
                    # 绘制分数文本
                    cv2.putText(
                        img,
                        label,
                        (x1, y1 - 2),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5,
                        (0, 0, 0, 255),  # 黑色文本
                        1,
                        cv2.LINE_AA
                    )

            # 转换为base64
            _, buffer = cv2.imencode('.png', img)
            img_base64 = base64.b64encode(buffer).decode('utf-8')

            return {
                'image': img_base64,
                'format': 'png',
                'width': width,
                'height': height,
                'bbox_count': len(bboxes)
            }

        except Exception as e:
            self.logger.error(f"绘制边界框时出错: {str(e)}")
            raise

    def visualize_landmarks(
        self, 
        landmarks_list: List[List[List[float]]], 
        width: int = 512, 
        height: int = 512,
        point_radius: int = 5,
        show_indices: bool = True
    ) -> Dict[str, Any]:
        """
        根据多组人脸landmark数据生成关键点可视化图像

        参数:
            landmarks_list: 多组关键点坐标列表，每个元素是一组关键点 [[x1,y1], [x2,y2], ...]
            width: 生成图像的宽度
            height: 生成图像的高度
            point_radius: 关键点半径
            show_indices: 是否显示关键点索引

        返回:
            包含base64编码图像和其他信息的字典
        """
        try:
            # 确保landmarks_list是列表的列表
            if not isinstance(landmarks_list, list) or not landmarks_list:
                raise ValueError("landmarks_list 必须是非空列表")
                
            # 如果传入的是单组关键点，转换为列表形式
            if isinstance(landmarks_list[0], (int, float)) or \
               (isinstance(landmarks_list[0], list) and len(landmarks_list[0]) == 2 and 
                all(isinstance(x, (int, float)) for x in landmarks_list[0])):
                landmarks_list = [landmarks_list]

            # 创建半透明白色背景图像
            img = np.ones((height, width, 4), dtype=np.uint8) * 255  # 白色背景
            img[:, :, 3] = 255 * 0.2  # 设置Alpha通道为255 * 0.2 (20%透明度)


            # 定义不同人脸的关键点颜色 (BGR格式)
            face_colors = [
                (0, 0, 255, 255),    # 红色
                (0, 255, 0, 255),    # 绿色
                (255, 0, 0, 255),    # 蓝色
                (0, 255, 255, 255),  # 黄色
                (255, 0, 255, 255),  # 紫色
                (255, 255, 0, 255),  # 青色
                (0, 165, 255, 255),  # 橙色
                (128, 0, 128, 255),  # 紫色
            ]
            
            # 定义关键点类型颜色 (BGR格式)
            point_type_colors = [
                (0, 0, 255, 255),  # 红色 - 眼睛
                (0, 255, 0, 255),  # 绿色 - 鼻子
                (255, 0, 0, 255),  # 蓝色 - 嘴巴
                (255, 255, 0, 255),  # 青色 - 脸部轮廓
                (255, 0, 255, 255),  # 粉色 - 眉毛
            ]

            total_points = 0
            
            # 遍历每组关键点
            for face_idx, landmarks in enumerate(landmarks_list):
                if not landmarks or not all(isinstance(p, (list, tuple)) and len(p) == 2 for p in landmarks):
                    self.logger.warning(f"跳过无效的关键点组 {face_idx}")
                    continue
                    
                # 选择当前人脸的颜色，循环使用颜色列表
                face_color = face_colors[face_idx % len(face_colors)]
                
                # 根据关键点数量确定分组
                if len(landmarks) == 5:  # 对于insightface模型的5个关键点
                    point_groups = [0, 1, 2, 3, 4]
                else:
                    # 默认每10个点一组颜色
                    point_groups = [
                        min(i // 10, len(point_type_colors) - 1) 
                        for i in range(len(landmarks))
                    ]
                
                # 绘制关键点
                for i, point in enumerate(landmarks):
                    try:
                        x, y = int(float(point[0])), int(float(point[1]))
                        
                        # 确保坐标在图像范围内
                        if 0 <= x < width and 0 <= y < height:
                            # 选择关键点类型颜色
                            color_idx = point_groups[i] if i < len(point_groups) else 0
                            point_color = point_type_colors[color_idx]
                            
                            # 使用人脸颜色和关键点类型的混合
                            mixed_color = [
                                int(face_color[j] * 0.7 + point_color[j] * 0.3) 
                                for j in range(4)
                            ]
                            
                            # 绘制关键点
                            cv2.circle(img, (x, y), point_radius, mixed_color, -1)
                            
                            # 添加索引标签
                            if show_indices:
                                cv2.putText(
                                    img,
                                    str(i),
                                    (x + point_radius + 2, y + point_radius // 2),
                                    cv2.FONT_HERSHEY_SIMPLEX,
                                    0.4,
                                    (0, 0, 0, 255),  # 黑色文字
                                    1,
                                )
                                
                                # 添加人脸索引
                                if i == 0:  # 在第一个点上添加人脸索引
                                    cv2.putText(
                                        img,
                                        f"Face {face_idx}",
                                        (x - 10, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX,
                                        0.5,
                                        face_color,
                                        1,
                                    )
                                
                            total_points += 1
                            
                    except (ValueError, IndexError) as e:
                        self.logger.warning(f"处理关键点时出错: {e}")
                        continue

            # 将RGBA图像转换为PNG格式的base64
            _, buffer = cv2.imencode(".png", img)
            img_base64 = base64.b64encode(buffer).decode("utf-8")

            return {
                "image_base64": img_base64,
                "width": width,
                "height": height,
                "total_point_count": total_points,
                "face_count": len(landmarks_list),
            }

        except Exception as e:
            self.logger.error(f"可视化关键点时出错: {str(e)}")
            raise

    def _extract_face_embedding(
        self, image_path: str
    ) -> Tuple[Optional[np.ndarray], Optional[str]]:
        """提取图片中的人脸特征"""
        img, error = self._image_process(image_path)
        if img is None:
            return None, f"无法读取图片{image_path}, 错误原因: {error}"

        faces = self.app.get(img)
        if not faces:
            return None, f"未在图片中检测到人脸: {image_path}"

        # 返回第一个检测到的人脸特征
        return faces[0].embedding, None

    def _extract_multiple_face_embeddings(
        self, image_path: str
    ) -> Tuple[Optional[List[np.ndarray]], Optional[str]]:
        """提取图片中所有人脸特征"""
        # img = cv2.imread(image_path)
        img, error = self._image_process(image_path)
        if img is None:
            return None, f"无法读取图片{image_path}, 错误原因: {error}"

        faces = self.app.get(img)
        if not faces:
            return None, f"未在图片中检测到人脸: {image_path}"

        # 返回所有检测到的人脸特征
        return [face.embedding for face in faces], None

    def extract_features_from_image(
        self,
        image_data: np.ndarray,
        visualize: bool = False,
        det_thresh: Optional[float] = None,  # 新增参数，允许临时修改阈值
    ) -> List[FaceFeature]:
        """
        直接从内存中的图像数据提取人脸特征

        参数:
            image_data: 输入图像数据(numpy数组)，支持BGR或RGB格式
            visualize: 是否可视化结果

        返回:
            包含人脸特征的字典列表
        """
        try:
            # 确保图像数据是numpy数组
            if not isinstance(image_data, np.ndarray):
                raise ValueError("输入图像数据必须是numpy数组")

            # 检查图像格式并转换为BGR(如果输入是RGB)
            if image_data.ndim == 3 and image_data.shape[2] == 3:
                # 假设输入可能是RGB格式，转换为BGR
                img = cv2.cvtColor(image_data, cv2.COLOR_RGB2BGR)
            else:
                img = image_data

            # 检测人脸
            self.logger.info(f"从内存图像数据检测人脸并提取特征，阈值{det_thresh}...")
            self.app.prepare(
                ctx_id=self.ctx_id,
                det_thresh=det_thresh
                if det_thresh is not None
                else self.app.det_thresh,
                det_size=self.det_size,
            )  # 设置设备id，检测阈值，范围大小，阈值通过参数传入调整，其他沿用初始化时的设置

            # 获取人脸
            faces = self.app.get(img)

            if not faces:
                self.logger.info("未检测到人脸")
                return []

            self.logger.info(f"检测到 {len(faces)} 个人脸")

            # 复原初始化参数设置
            self.app.prepare(
                ctx_id=self.ctx_id,
                det_thresh=self.app.det_thresh,
                det_size=self.det_size,
            )

            # 提取特征(重用现有逻辑)
            results = []
            for i, face in enumerate(faces):
                bbox = face.bbox.astype(int).tolist()
                landmark = (
                    face.kps.astype(float).tolist()
                    if hasattr(face, "kps") and face.kps is not None
                    else None
                )
                embedding = (
                    face.embedding.tolist() if hasattr(face, "embedding") else None
                )
                gender = (
                    int(face.gender)
                    if hasattr(face, "gender") and face.gender is not None
                    else None
                )
                age = (
                    int(face.age)
                    if hasattr(face, "age") and face.age is not None
                    else None
                )

                face_info = {
                    "face_id": i,
                    "bbox": bbox,
                    "landmark": landmark,
                    "embedding_dimension": len(embedding) if embedding else None,
                    "embedding": embedding,
                    "gender": gender,
                    "age": age,
                }
                results.append(face_info)

            # 可视化结果(可选)
            if visualize:
                vis_img = img.copy()
                for face in faces:
                    bbox = face.bbox.astype(int)
                    cv2.rectangle(
                        vis_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2
                    )
                    if (
                        hasattr(face, "gender")
                        and face.gender is not None
                        and hasattr(face, "age")
                        and face.age is not None
                    ):
                        gender = "Male" if face.gender == 1 else "Female"
                        label = f"{gender}, {int(face.age)}"
                        cv2.putText(
                            vis_img,
                            label,
                            (bbox[0], bbox[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.8,
                            (0, 255, 0),
                            2,
                        )
                cv2.imshow("Detected Faces", vis_img)
                cv2.waitKey(0)
                cv2.destroyAllWindows()

            return results

        except Exception as e:
            self.logger.error(f"从内存图像提取特征失败: {str(e)}")
            self.logger.error(traceback.format_exc())
            return []
