import cv2
import numpy as np
from collections import deque
import math
import joblib  # 用于加载 .pkl 的 SVR 模型
import os
import joblib
from .infer import return_center_map
from .model import DenseNet2D
import torch
import json

class EyeFeatureExtractor:
    def __init__(self, model_path, device, light_threshold=150, eccentricity_threshold=0.7, ransac_threshold=1.5):
        self.model = DenseNet2D()
        self.device = device
        state_dict=torch.load(model_path, map_location=self.device)
        filtered_state_dict = {k: v for k, v in state_dict.items() if not k.startswith('elReg')}
        self.model.load_state_dict(filtered_state_dict)
        self.model.to(self.device)
        self.model.eval()  

        self.light_threshold = light_threshold
        self.eccentricity_threshold = eccentricity_threshold
        self.ransac_threshold = ransac_threshold

    def detect_glints(self, img_path):
        img = cv2.imread(img_path)
        if img is None:
            raise FileNotFoundError(f"无法读取图像: {img_path}")
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        _, binary = cv2.threshold(gray, self.light_threshold, 255, cv2.THRESH_BINARY)
        glints=self._find_islands(binary)
        return glints, img

    # def _find_islands(self, grid, min_area=1):
    #     rows, cols = len(grid), len(grid[0])
    #     visited = [[False] * cols for _ in range(rows)]
    #     directions = [(-1,-1), (-1,0), (-1,1),
    #                   (0,-1),           (0,1),
    #                   (1,-1),  (1,0),  (1,1)]
    #     islands = []

    #     def bfs(r0, c0):
    #         queue = deque([(r0, c0)])
    #         visited[r0][c0] = True
    #         area = total_r = total_c = 0
    #         while queue:
    #             r, c = queue.popleft()
    #             area += 1; total_r += r; total_c += c
    #             for dr, dc in directions:
    #                 nr, nc = r+dr, c+dc
    #                 if 0<=nr<rows and 0<=nc<cols and not visited[nr][nc] and grid[nr][nc]==255:
    #                     visited[nr][nc] = True
    #                     queue.append((nr, nc))
    #         return (int(total_c/area), int(total_r/area))  # (y, x)

    #     for i in range(rows):
    #         for j in range(cols):
    #             if grid[i][j] == 255 and not visited[i][j]:
    #                 center = bfs(i, j)
    #                 islands.append(center)
    #     return islands

    def _find_islands(self, grid, min_area=1):

        # 转为uint8类型并确保是numpy数组
        grid = np.array(grid, dtype=np.uint8)

        # 调用OpenCV连通域分析 (8邻域)
        num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(grid, connectivity=8)

        islands = []
        for i in range(1, num_labels):  # label=0 是背景
            area = stats[i, cv2.CC_STAT_AREA]
            if area >= min_area:
                cx, cy = centroids[i]  # 注意cv2返回 (x, y)
                islands.append((int(cx), int(cy)))  # 保持原函数输出格式 (y, x)

        return islands


    def get_pupil_center_and_filtered_glints(self, img_path, glints):
        pupil_center, seg_map = self._return_center_map(self.model, img_path, self.device)
        if pupil_center is None:
            return None, None
        filtered_glints, _ = self._filter_coordinates_by_map(glints, seg_map)
        return pupil_center, filtered_glints

    def _return_center_map(self, model, img_path, device):
        # 占位：请替换为你的实际模型推理逻辑
        """
        使用模型预测瞳孔中心和分割图。
        注意：你需要根据你的模型实际输出实现此函数。
        示例返回值（需适配你的模型）：
            pupil_center: (x, y)
            seg_map: HxW 的分割图（0: 背景, 1: 瞳孔, 2: iris 等）
        """
        return return_center_map(model, img_path, device)

    def _filter_coordinates_by_map(self, coordinates, map_img):
        H, W = map_img.shape
        segmap = np.zeros((H, W), dtype=np.uint8)
        segmap[(map_img == 1) | (map_img == 2)] = 255
        filtered = []
        for x, y in coordinates:
            if 0 <= x < W and 0 <= y < H and segmap[y, x] == 255:
                filtered.append([x, y])
        return np.array(filtered), segmap

    def rotate_points(self,points, angle_degrees, center):
        angle_rad = np.deg2rad(angle_degrees)
        c, s = np.cos(angle_rad), np.sin(angle_rad)
        R = np.array([[c, -s], [s, c]])

        points_centered = points - center
        rotated_points = (R @ points_centered.T).T + center

        return rotated_points

    def fit_ellipse_ransac(self,points, num_iterations=500, threshold=1.5):
        import itertools
        best_model = None
        best_inliers = None
        max_inliers = 0
        min_error = float('inf')
        points = np.array(points)
        n_points = len(points)

        if n_points<5:
            return None,None,None
        combinations = list(itertools.combinations(range(len(points)), 5))
        #num_iterations=min(num_iterations,math.comb(n_points,5))
        #for _ in range(num_iterations):
         #   indices = np.random.choice(n_points, 5, replace=False)
        for indices_tuple in combinations:
            indices = np.array(indices_tuple, dtype=int)
            sample_points = points[indices]
            try:
                ellipse = cv2.fitEllipse(sample_points.astype(np.float32))
                center, axes, angle = ellipse

                # 确保 center 和 axes 是整数元组
                center_int = (int(center[0]), int(center[1]))
                axes_int = (int(axes[0] / 2), int(axes[1] / 2))  # 注意：axes 参数需要半轴长度
                a = max(axes_int) / 2  # 半长轴
                b = min(axes_int) / 2  # 半短轴

                ecc = math.sqrt(1 - (b**2 / a**2))
                #print(ecc)
                if ecc>=0.7:
                    continue
                
                rotated_points = self.rotate_points(points, angle_degrees=-angle, center=center)

                ellipse_contour = cv2.ellipse2Poly(center=center_int,
                                                    axes=axes_int,
                                                    angle=0,
                                                    arcStart=0,
                                                    arcEnd=360,
                                                    delta=1)
                #print(rotated_points)
                distances = []
                for point in rotated_points:
                    # 强制转换为 Python 原生 int
                    
                    pt_tuple = (int(point[0]), int(point[1]))
                    dist = cv2.pointPolygonTest(ellipse_contour,pt_tuple , measureDist=True)
                    distances.append(abs(dist))

                distances = np.array(distances)
                inlier_mask = distances < threshold
                inliers = points[inlier_mask]
                
                #计算内点误差
                error_inliers = distances[inlier_mask]
                mean_error = np.mean(error_inliers)
                
                if len(inliers) >max_inliers:
                    max_inliers = len(inliers)
                    best_model = ellipse
                    best_inliers = inliers
                    
                    

            except Exception as e:
                print(e)
                continue  # 忽略无效拟合
        
        return best_model, best_inliers,ecc

    def extract_features(self, img_path):
        """
        提取单眼特征向量 [m, n, m², n², m·n]

        返回:
            features: np.array([m, n, m², n², m·n]) 或 None（失败）
            raw_data: 原始信息（用于调试）{'pupil': (x,y), 'ellipse': (x,y)}
        """
        glints, _ = self.detect_glints(img_path)
        if not glints:
            return None, None

        pupil_center, filtered_glints = self.get_pupil_center_and_filtered_glints(img_path, glints)
        
        if pupil_center is None or len(filtered_glints) < 5:
            return None, None

        ellipse, _, _ = self.fit_ellipse_ransac(filtered_glints)
        
        if ellipse is None:
            return None, None

        ellipse_center = np.array(ellipse[0])  # (x, y)
        pupil_center = np.array(pupil_center)
       
        return pupil_center,ellipse_center
    



class BinocularSVRTracker:
    """
    双眼 SVR 注视点预测器（推理模式，带归一化）。
    模型:
        - L: 左眼
        - R: 右眼
        - two: 双眼联合
    每个模型对应一个 StandardScaler。
    """
    def __init__(self, feature_extractor: EyeFeatureExtractor, model_paths, scaler_paths):
        self.feature_extractor = feature_extractor

        # 检查路径
        for key in ['L', 'R', 'two']:
            if not os.path.isfile(model_paths[key]):
                raise FileNotFoundError(f"{key} 模型不存在: {model_paths[key]}")
            if not os.path.isfile(scaler_paths[key]):
                raise FileNotFoundError(f"{key} scaler 不存在: {scaler_paths[key]}")

        # 加载模型
        self.models = {key: joblib.load(model_paths[key]) for key in ['L', 'R', 'two']}
        # 加载归一化器
        self.scalers = {key: joblib.load(scaler_paths[key]) for key in ['L', 'R', 'two']}
        print("✅ 模型与归一化器加载完成：", list(self.models.keys()))

    def predict_from_images(self, img_L_path=None, img_R_path=None):
        """
        根据左右眼图像路径进行预测，逻辑与 EyeTrainer.evaluate_real 一致。
        """
        pred = None

        pupil_L = ellipse_L = pupil_R = ellipse_R = None

        # 提取左眼特征
        if img_L_path:
            pupil_L, ellipse_L = self.feature_extractor.extract_features(img_L_path)
        # 提取右眼特征
        if img_R_path:
            pupil_R, ellipse_R = self.feature_extractor.extract_features(img_R_path)
        # print(f"左眼瞳孔: {pupil_L}, 椭圆: {ellipse_L},右眼瞳孔: {pupil_R}, 椭圆: {ellipse_R}")
        # 双眼都有有效特征，使用 two 模型
        if all(v is not None for v in [pupil_L, ellipse_L, pupil_R, ellipse_R]):
            ml, nl = (pupil_L - ellipse_L)[:2]
            mr, nr = (pupil_R - ellipse_R)[:2]
            feats = [[ml, nl, mr, nr, ml**2, nl**2, mr**2, nr**2, ml*nl, mr*nr]]
            feats_norm = self.scalers['two'].transform(feats)
            pred = self.models['two'].predict(feats_norm)[0]
            #print(f"双眼联合预测: {pred}")

        # 仅左眼有效
        elif pupil_L is not None and ellipse_L is not None:
            m, n = (pupil_L - ellipse_L)[:2]
            feats = [[m, n, m*m, n*n, m*n]]
            feats_norm = self.scalers['L'].transform(feats)
            pred = self.models['L'].predict(feats_norm)[0]
            #print(f"仅左眼预测: {pred}")

        # 仅右眼有效
        elif pupil_R is not None and ellipse_R is not None:
            m, n = (pupil_R - ellipse_R)[:2]
            feats = [[m, n, m*m, n*n, m*n]]
            feats_norm = self.scalers['R'].transform(feats)
            pred = self.models['R'].predict(feats_norm)[0]
            #print(f"仅右眼预测: {pred}")

        # 都无效
        else:
            print("⚠️ 左右眼均无法预测")
            pred = None

        return pred

def compute_angle_errors(vectors_a, vectors_b, H=2, degrees=True):
        vectors_a, vectors_b = np.array(vectors_a), np.array(vectors_b)
        if vectors_a.shape != vectors_b.shape:
            raise ValueError("两组向量的形状必须相同")
        vectors_a = np.concatenate([vectors_a, np.full((vectors_a.shape[0], 1), H)], axis=1)
        vectors_b = np.concatenate([vectors_b, np.full((vectors_b.shape[0], 1), H)], axis=1)
        norm_a = vectors_a / np.linalg.norm(vectors_a, axis=1, keepdims=True)
        norm_b = vectors_b / np.linalg.norm(vectors_b, axis=1, keepdims=True)
        cos_similarities = np.clip(np.sum(norm_a * norm_b, axis=1), -1.0, 1.0)
        angles = np.arccos(cos_similarities)
        return np.degrees(angles) if degrees else angles



def test_folder(folder_path, tracker: BinocularSVRTracker):
    label_file = os.path.join(folder_path, 'annotations.json')
    with open(label_file, 'r') as f:
        labels = json.load(f)

    error_list = []
    for item in labels:
        img_L_path = os.path.join(folder_path, item["left_img"])
        img_R_path = os.path.join(folder_path, item["right_img"])
        label = item["label"]
        gaze = tracker.predict_from_images(img_L_path=img_L_path, img_R_path=img_R_path)
        if gaze is not None:
            error = compute_angle_errors([gaze], [label])[0]
            error_list.append(error)
        else:
            error_list.append(None)

    print("\n所有图像平均误差为: ", np.nanmean([e for e in error_list if e is not None]))


if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    feature_extractor = EyeFeatureExtractor(
        model_path="/home/wtpan/wcmx/model_dict/newglass_test_7.pth",
        device=device
    )
    # idx=1
    # Test_root = "/home/wtpan/wcmx/data/finaltestdatanew/"+str(idx)
    # model_paths = {
    #     'L': Test_root+"/model/L_regressor.pkl",
    #     'R': Test_root+"/model/R_regressor.pkl",
    #     'two': Test_root+"/model/two_regressor.pkl"
    # }

    # scaler_paths = {
    #     'L': Test_root+"/model/L_scaler.pkl",
    #     'R': Test_root+"/model/R_scaler.pkl",
    #     'two': Test_root+"/model/two_scaler.pkl"
    # }
    # tracker = BinocularSVRTracker(feature_extractor, model_paths, scaler_paths)
    # test_folder(Test_root+"/samples", tracker)
    img_path = "/home/wtpan/wcmx/data/EyeCameraI/eyedatalogger2025-08-24-23-28-23/camera_R_0/7136495044391.bmp"
    features, raw = feature_extractor.extract_features(img_path)
    print("提取的特征:", raw)

    # # 示例预测
    # gaze = tracker.predict(
    #     left_img_path="test_images/left_eye.jpg",
    #     right_img_path="test_images/right_eye.jpg"
    # )
    # if gaze is not None:
    #     print(f"预测注视点: {gaze}")