"""
简化版 DINOv2 关键点提取
使用最轻量 DINOv2 模型 (21MB 蒸馏版本)
"""

import numpy as np
import torch
import cv2
from torch.nn.functional import interpolate
from kmeans_pytorch import kmeans
from sklearn.cluster import MeanShift
import yaml
from pathlib import Path


class KeypointProposer:
    def __init__(self, config):
        self.config = config['keypoint_proposer']
        self.device = torch.device(config['device'])
        self.bounds_min = np.array(config['bounds']['min'])
        self.bounds_max = np.array(config['bounds']['max'])
        
        # 加载最轻量的 DINOv2 蒸馏模型 (21MB)
        # 首次运行时会自动从torch.hub下载模型权重
        # 下载路径：~/.cache/torch/hub/checkpoints/
        print("正在加载 DINOv2 模型...")
        print("首次运行需要下载模型（约21MB），请耐心等待...")
        self.dinov2 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14').eval().to(self.device)
        print("DINOv2 模型加载完成")
        
        # 初始化聚类器
        self.mean_shift = MeanShift(
            bandwidth=self.config['min_dist_bt_keypoints'],
            bin_seeding=True,
            n_jobs=-1
        )
        
        self.patch_size = self.config['patch_size']
        
        # 设置随机种子
        np.random.seed(self.config['seed'])
        torch.manual_seed(self.config['seed'])
        if torch.cuda.is_available():
            torch.cuda.manual_seed(self.config['seed'])

    def get_keypoints(self, rgb_image, mask=None):
        """
        从RGB图像提取关键点
        
        Args:
            rgb_image: numpy array, shape (H, W, 3), 0-255 uint8
            mask: numpy array, shape (H, W), 可选的二值mask，背景为0，前景为1
        
        Returns:
            keypoints: np.array, shape (N, 2), 图像坐标系的关键点位置
            projected_img: np.array, 标注了关键点的图像
        """
        # 预处理图像
        transformed_rgb, shape_info = self._preprocess(rgb_image)
        
        # 获取特征
        features_flat = self._get_features(transformed_rgb, shape_info)
        
        # 如果没有提供mask，创建全图mask
        if mask is None:
            mask = np.ones((rgb_image.shape[0], rgb_image.shape[1]), dtype=bool)
        else:
            mask = mask.astype(bool)
        
        # 如果mask太大，跳过
        if np.mean(mask) > self.config['max_mask_ratio']:
            print(f"警告: mask占比过大 ({np.mean(mask):.2f}), 跳过")
            return np.array([]), rgb_image.copy()
        
        # 从mask区域提取关键点
        keypoints, keypoint_pixels = self._extract_keypoints_from_mask(
            features_flat, mask, shape_info
        )
        
        # 合并距离太近的关键点
        if len(keypoints) > 1:
            merged_indices = self._merge_clusters(keypoints)
            keypoints = keypoints[merged_indices]
            keypoint_pixels = keypoint_pixels[merged_indices]
        
        # 投影关键点到图像
        projected_img = self._project_keypoints_to_img(rgb_image, keypoint_pixels)
        
        return keypoints, projected_img

    def _preprocess(self, rgb_image):
        """
        预处理图像
        
        处理流程：
        1. 如果图像尺寸过大（手机照片通常4000x3000），先缩放到720px最大边长
           以提高效率和降低显存占用
        2. 确保图像能被patch_size(14)整除，以便DINOv2提取patch特征
        3. 归一化到[0, 1]
        
        Args:
            rgb_image: numpy array, shape (H, W, 3), 0-255 uint8
            
        Returns:
            transformed_rgb: numpy array, shape (new_H, new_W, 3), 0-1 float32
            shape_info: dict with original and processed dimensions
        """
        H, W, _ = rgb_image.shape
        
        # 手机照片分辨率通常很高（如4000x3000），先限制最大尺寸以提高效率
        max_dimension = 720  # 限制最大边长为720px
        if max(H, W) > max_dimension:
            scale = max_dimension / max(H, W)
            new_H = int(H * scale)
            new_W = int(W * scale)
            rgb_image = cv2.resize(rgb_image, (new_W, new_H))
            H, W = new_H, new_W
        
        # 确保图像大小能被patch_size整除（DINOv2要求）
        patch_h = H // self.patch_size
        patch_w = W // self.patch_size
        new_H = patch_h * self.patch_size
        new_W = patch_w * self.patch_size
        
        if H != new_H or W != new_W:
            transformed_rgb = cv2.resize(rgb_image, (new_W, new_H))
        else:
            transformed_rgb = rgb_image.copy()
        
        # 归一化到[0,1]
        transformed_rgb = transformed_rgb.astype(np.float32) / 255.0
        
        shape_info = {
            'img_h': H,
            'img_w': W,
            'patch_h': patch_h,
            'patch_w': patch_w,
        }
        
        return transformed_rgb, shape_info

    @torch.inference_mode()
    def _get_features(self, transformed_rgb, shape_info):
        """提取DINOv2特征"""
        img_h = shape_info['img_h']
        img_w = shape_info['img_w']
        patch_h = shape_info['patch_h']
        patch_w = shape_info['patch_w']
        
        # 转换为tensor
        img_tensor = torch.from_numpy(transformed_rgb).permute(2, 0, 1).unsqueeze(0).to(self.device)
        
        # 提取特征
        features_dict = self.dinov2.forward_features(img_tensor)
        raw_feature_grid = features_dict['x_norm_patchtokens']  # [1, patch_h*patch_w, feature_dim]
        
        raw_feature_grid = raw_feature_grid.reshape(1, patch_h, patch_w, -1)
        
        # 双线性插值到原图大小
        interpolated_feature_grid = interpolate(
            raw_feature_grid.permute(0, 3, 1, 2),
            size=(img_h, img_w),
            mode='bilinear'
        ).permute(0, 2, 3, 1).squeeze(0)
        
        features_flat = interpolated_feature_grid.reshape(-1, interpolated_feature_grid.shape[-1])
        
        return features_flat

    def _extract_keypoints_from_mask(self, features_flat, mask, shape_info):
        """从mask区域内提取关键点"""
        # 获取mask区域的特征
        mask_flat = mask.reshape(-1)
        obj_features = features_flat[mask_flat]
        
        if len(obj_features) == 0:
            return np.array([]), np.array([])
        
        # 获取mask区域的像素坐标
        mask_pixels = np.argwhere(mask)  # [N, 2] with (row, col)
        
        # PCA降维
        obj_features = obj_features.double()
        (u, s, v) = torch.pca_lowrank(obj_features, center=False)
        features_pca = torch.mm(obj_features, v[:, :3])
        
        # 归一化
        features_pca = (features_pca - features_pca.min(0)[0]) / (features_pca.max(0)[0] - features_pca.min(0)[0] + 1e-8)
        
        # 添加像素坐标作为额外维度
        mask_pixels_torch = torch.tensor(mask_pixels, dtype=features_pca.dtype, device=features_pca.device)
        mask_pixels_torch = (mask_pixels_torch - mask_pixels_torch.min(0)[0]) / (mask_pixels_torch.max(0)[0] - mask_pixels_torch.min(0)[0] + 1e-8)
        
        # 组合特征
        X = torch.cat([features_pca, mask_pixels_torch], dim=-1)
        
        # K-means聚类
        cluster_ids_x, cluster_centers = kmeans(
            X=X,
            num_clusters=self.config['num_candidates_per_mask'],
            distance='euclidean',
            device=self.device,
        )
        
        # 提取每个聚类的中心点
        keypoints = []
        keypoint_pixels = []
        
        for cluster_id in range(self.config['num_candidates_per_mask']):
            cluster_center = cluster_centers[cluster_id][:3]
            member_idx = cluster_ids_x == cluster_id
            
            if member_idx.sum() == 0:
                continue
                
            member_features = features_pca[member_idx]
            member_pixels = mask_pixels[member_idx]
            
            # 找最接近聚类中心的点
            dist = torch.norm(member_features - cluster_center, dim=-1)
            closest_idx = torch.argmin(dist)
            
            keypoint_pixels.append(member_pixels[closest_idx])
            keypoints.append(member_pixels[closest_idx])
        
        keypoints = np.array(keypoints)
        keypoint_pixels = np.array(keypoint_pixels)
        
        return keypoints, keypoint_pixels

    def _merge_clusters(self, keypoints):
        """合并距离太近的关键点"""
        if len(keypoints) <= 1:
            return list(range(len(keypoints)))
        
        self.mean_shift.fit(keypoints)
        cluster_centers = self.mean_shift.cluster_centers_
        
        merged_indices = []
        for center in cluster_centers:
            dist = np.linalg.norm(keypoints - center, axis=-1)
            merged_indices.append(np.argmin(dist))
        
        return merged_indices

    def _project_keypoints_to_img(self, rgb_image, keypoint_pixels):
        """将关键点投影到图像上"""
        projected = rgb_image.copy()
        
        for i, pixel in enumerate(keypoint_pixels):
            row, col = int(pixel[0]), int(pixel[1])
            
            # 绘制圆形标记
            cv2.circle(projected, (col, row), 15, (0, 255, 0), -1)  # 填充圆
            cv2.circle(projected, (col, row), 15, (0, 0, 0), 2)    # 边框
            
            # 绘制数字
            text = str(i)
            text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]
            text_x = col - text_size[0] // 2
            text_y = row + text_size[1] // 2
            cv2.putText(projected, text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
        
        return projected


def load_config(config_path='config.yaml'):
    """加载配置文件"""
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    return config


if __name__ == '__main__':
    # 测试代码
    config = load_config()
    proposer = KeypointProposer(config)
    
    # 读取测试图像
    test_image_path = Path('input_images/test.jpg')
    if test_image_path.exists():
        rgb = cv2.imread(str(test_image_path))
        keypoints, projected = proposer.get_keypoints(rgb)
        print(f"提取了 {len(keypoints)} 个关键点")
        cv2.imwrite('outputs/result.jpg', projected)
    else:
        print("请将测试图像放在 input_images/test.jpg")

