import os
import cv2
import numpy as np
import json
import shutil
from insightface.app import FaceAnalysis
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler

class FaceClassifier:
    def __init__(self, source_path, output_path, eps=0.5, min_samples=3):
        self.source_path = source_path
        self.output_path = output_path
        self.eps = eps
        self.min_samples = min_samples
        self.app = FaceAnalysis(name='buffalo_l', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
        self.app.prepare(ctx_id=0, det_size=(640, 640))
        os.makedirs(output_path, exist_ok=True)
        # 创建特征存储目录
        self.feature_dir = os.path.join(output_path, "face_features")
        os.makedirs(self.feature_dir, exist_ok=True)

    def extract_features_to_disk(self):
        """阶段1：提取人脸特征并存储到硬盘"""
        image_exts = ('.jpg', '.jpeg', '.png', '.bmp', '.tif')
        image_files = [f for f in os.listdir(self.source_path) 
                       if f.lower().endswith(image_exts)]
        
        metadata = []  # 存储元数据
        
        for img_file in image_files:
            img_path = os.path.join(self.source_path, img_file)
            img = cv2.imread(img_path)
            if img is None:
                continue
  
            faces = self.app.get(img)
            
            print(f"processing img: {img_path}, faces count: {len(faces)}")
            
            if not faces:
                self._save_unclassified(img, img_file)
                continue
                
            for j, face in enumerate(faces):
                face_embedding = face.embedding
                bbox = face.bbox.astype(int)
                # 裁剪并保存人脸图片
                face_img = img[bbox[1]:bbox[3], bbox[0]:bbox[2]]
                if face_img is not None:
                        # 生成唯一特征文件名
                        feature_file = f"{os.path.splitext(img_file)[0]}_face{j}.npy"
                        feature_path = os.path.join(self.feature_dir, feature_file)
                        # 保存特征向量
                        np.save(feature_path, face_embedding)
                        face_img_path = os.path.join(self.feature_dir, f"{os.path.splitext(img_file)[0]}_face{j}.jpg")
                        cv2.imwrite(face_img_path, face_img)
		        
                        # 记录元数据
                        metadata.append({
		            "feature_path": feature_path,
		            "face_img_path": face_img_path,
		            "original_image": img_file,
		            "bbox": bbox.tolist()
		        })
        
        # 保存元数据到JSON文件
        metadata_path = os.path.join(self.output_path, "face_metadata.json")
        with open(metadata_path, 'w') as f:
            json.dump(metadata, f)
        
        print(f"特征提取完成！共处理 {len(metadata)} 个人脸。元数据保存在: {metadata_path}")
        return metadata_path

    def cluster_from_disk(self, metadata_path):
        """阶段2：从硬盘加载特征进行聚类"""
        with open(metadata_path, 'r') as f:
            metadata = json.load(f)
        
        if not metadata:
            print("警告：无有效人脸数据")
            return
            
        # 加载所有特征向量
        all_features = []
        valid_metadata = []
        for item in metadata:
            feature_path = item["feature_path"]
            if os.path.exists(feature_path):
                feature = np.load(feature_path)
                all_features.append(feature)
                valid_metadata.append(item)
        
        X = np.array(all_features)
        if len(X) == 0:
            print("无有效特征数据")
            return
            
        # 标准化特征
        X = StandardScaler().fit_transform(X)
        
        # DBSCAN聚类(cos)
        dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples, metric='cosine')
        cluster_labels = dbscan.fit_predict(X)
        
        # 创建聚类目录并保存结果
        for i, (label, meta) in enumerate(zip(cluster_labels, valid_metadata)):
            cluster_id = f"Person_{label + 1}" if label != -1 else "unclassified"
            cluster_dir = os.path.join(self.output_path, cluster_id)
            os.makedirs(cluster_dir, exist_ok=True)
            
            # 复制人脸图片到聚类目录
            face_img_path = meta["face_img_path"]
            face_img_name = os.path.basename(face_img_path)
            shutil.copy(face_img_path, os.path.join(cluster_dir, face_img_name))
            
            # 保存聚类结果元数据
            result_meta = {
                "cluster_id": int(label),
                "original_image": meta["original_image"],
                "bbox": meta["bbox"],
                "feature_path": meta["feature_path"]
            }
            with open(os.path.join(cluster_dir, f"{os.path.splitext(face_img_name)[0]}_meta.json"), 'w') as f:
                json.dump(result_meta, f)
        
        # 统计结果
        n_clusters = len(set(cluster_labels)) - (1 if -1 in cluster_labels else 0)
        print(f"聚类完成！共识别出 {n_clusters} 个不同人物，{sum(cluster_labels == -1)} 个未分类人脸")
        
    def _save_unclassified(self, img, original_filename):
        """保存未检测到人脸的原始图片"""
        unclassified_dir = os.path.join(self.output_path, "unclassified")
        os.makedirs(unclassified_dir, exist_ok=True)
        cv2.imwrite(os.path.join(unclassified_dir, original_filename), img)

if __name__ == "__main__":
    SOURCE_DIR = "input_images"  # 替换为源图片目录
    OUTPUT_DIR = "output_for_2step"        # 替换为输出目录
    
    # 初始化分类器（参数与原始代码一致）
    classifier = FaceClassifier(
        source_path=SOURCE_DIR,
        output_path=OUTPUT_DIR,
        eps=0.65,       # 保持原始DBSCAN参数
        min_samples=2   # 保持原始DBSCAN参数
    )
    
    # 阶段1：提取特征到硬盘
    print("阶段1：提取特征到硬盘")
    metadata_path = classifier.extract_features_to_disk()
    
    # 阶段2：从硬盘加载特征进行聚类
    print("阶段2：从硬盘加载特征进行聚类")
    if metadata_path:
        classifier.cluster_from_disk(metadata_path)
        
