import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy import spatial, stats
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import json
import os
from pathlib import Path


class GardenSimilarityAnalyzer:
    def __init__(self):
        self.reference_features = []
        self.scaler = StandardScaler()

    def process_image(self, image_path):
        """处理单张图像并提取特征"""
        # 读取图像
        img = cv2.imread(image_path)
        if img is None:
            raise ValueError(f"无法读取图像: {image_path}")

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 二值化处理
        _, thresh = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY_INV)

        # 形态学操作去除小噪点
        kernel = np.ones((2, 2), np.uint8)
        cleaned = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)

        return img, gray, cleaned

    def extract_contour_features(self, cleaned_img):
        """提取轮廓特征"""
        # 查找轮廓
        contours, _ = cv2.findContours(cleaned_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # 找到最大轮廓（假设为园林主体）
        if not contours:
            return None

        main_contour = max(contours, key=cv2.contourArea)

        # 计算轮廓特征
        area = cv2.contourArea(main_contour)
        perimeter = cv2.arcLength(main_contour, True)

        # 计算轮廓的紧凑度
        compactness = 4 * np.pi * area / (perimeter ** 2) if perimeter > 0 else 0

        # 计算最小外接矩形
        rect = cv2.minAreaRect(main_contour)
        box = cv2.boxPoints(rect)
        box_area = cv2.contourArea(box.astype(np.int0))

        # 轮廓填充率
        fill_ratio = area / box_area if box_area > 0 else 0

        # 计算轮廓的Hu矩
        moments = cv2.moments(main_contour)
        hu_moments = cv2.HuMoments(moments).flatten()

        # 计算轮廓的椭圆拟合
        if len(main_contour) >= 5:
            ellipse = cv2.fitEllipse(main_contour)
            (center, axes, angle) = ellipse
            elongation = max(axes) / min(axes) if min(axes) > 0 else 0
        else:
            elongation = 0

        return {
            "area": float(area),
            "perimeter": float(perimeter),
            "compactness": float(compactness),
            "fill_ratio": float(fill_ratio),
            "elongation": float(elongation),
            "hu_moments": [float(m) for m in hu_moments],
        }

    def detect_structures(self, cleaned_img, min_area=50):
        """检测建筑物/结构"""
        # 查找内部轮廓（建筑物）
        contours, _ = cv2.findContours(cleaned_img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)

        # 过滤掉太小的轮廓
        structure_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > min_area]

        # 计算建筑物中心点
        structure_centers = []
        structure_areas = []

        for cnt in structure_contours:
            M = cv2.moments(cnt)
            if M["m00"] != 0:
                cx = int(M["m10"] / M["m00"])
                cy = int(M["m01"] / M["m00"])
                area = cv2.contourArea(cnt)
                structure_centers.append((cx, cy))
                structure_areas.append(area)

        return structure_centers, structure_areas, structure_contours

    def analyze_spatial_distribution(self, structure_centers, structure_areas, img_shape):
        """分析空间分布特征"""
        if len(structure_centers) < 1:
            return {"structure_count": 0}

        # 转换为numpy数组
        points = np.array(structure_centers)
        areas = np.array(structure_areas)

        # 计算建筑物数量
        structure_count = len(points)

        # 计算建筑物分布的中心
        center_x, center_y = np.mean(points, axis=0)

        # 计算建筑物分布的协方差矩阵和特征值
        if len(points) > 1:
            cov = np.cov(points.T)
            eigenvalues = np.linalg.eigvals(cov)
            elongation = max(eigenvalues) / min(eigenvalues) if min(eigenvalues) > 0 else 0
        else:
            elongation = 0

        # 计算最近邻距离统计
        if len(points) > 1:
            tree = spatial.KDTree(points)
            distances, _ = tree.query(points, k=min(2, len(points)))
            if len(points) > 1:
                nn_distances = distances[:, 1]  # 最近邻距离（排除自身）
            else:
                nn_distances = distances
            mean_nn_distance = np.mean(nn_distances)
            std_nn_distance = np.std(nn_distances)
        else:
            mean_nn_distance = 0
            std_nn_distance = 0

        # 计算空间均匀性指标（最近邻比率）
        if len(points) > 1:
            area = img_shape[0] * img_shape[1]
            expected_mean_distance = 0.5 / np.sqrt(structure_count / area)
            nni = mean_nn_distance / expected_mean_distance if expected_mean_distance > 0 else 0
        else:
            nni = 0

        # 计算面积统计
        mean_area = np.mean(areas) if len(areas) > 0 else 0
        std_area = np.std(areas) if len(areas) > 0 else 0
        area_ratio = np.sum(areas) / area if area > 0 else 0

        # 聚类分析
        if len(points) > 2:
            # 使用K-means尝试找出建筑群
            n_clusters = min(4, len(points))
            kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(points)
            cluster_centers = kmeans.cluster_centers_
            cluster_labels = kmeans.labels_

            # 计算聚类间的平均距离
            cluster_distances = []
            for i in range(len(cluster_centers)):
                for j in range(i + 1, len(cluster_centers)):
                    dist = np.linalg.norm(cluster_centers[i] - cluster_centers[j])
                    cluster_distances.append(dist)

            mean_cluster_distance = np.mean(cluster_distances) if cluster_distances else 0
        else:
            cluster_centers = []
            mean_cluster_distance = 0

        return {
            "structure_count": structure_count,
            "mean_structure_area": float(mean_area),
            "std_structure_area": float(std_area),
            "area_ratio": float(area_ratio),
            "distribution_center": [float(center_x), float(center_y)],
            "distribution_elongation": float(elongation),
            "mean_nearest_neighbor_distance": float(mean_nn_distance),
            "std_nearest_neighbor_distance": float(std_nn_distance),
            "nearest_neighbor_index": float(nni),
            "mean_cluster_distance": float(mean_cluster_distance)
        }

    def calculate_global_metrics(self, contour_features, spatial_features):
        """计算全局评价指标"""
        if not contour_features or not spatial_features:
            return None

        # 复杂度指标 - 结合轮廓复杂度和建筑分布
        complexity = (1 - contour_features["compactness"]) * 0.5 + spatial_features["nearest_neighbor_index"] * 0.5

        # 布局均衡性指标
        balance = 1 / (1 + abs(contour_features["fill_ratio"] - 0.6))  # 假设0.6是理想的填充率

        # 集中度指标 - 建筑分布是否集中
        concentration = 1 / (1 + spatial_features["mean_nearest_neighbor_distance"] / 150)  # 归一化因子

        # 结构丰富度指标
        richness = min(1.0, spatial_features["structure_count"] / 30)  # 假设最多30个结构

        return {
            "complexity_index": float(complexity),
            "balance_index": float(balance),
            "concentration_index": float(concentration),
            "richness_index": float(richness),
            "overall_score": float((complexity + balance + concentration + richness) / 4)
        }

    def extract_all_features(self, image_path):
        """从单张图像提取所有特征"""
        img, gray, cleaned = self.process_image(image_path)
        contour_features = self.extract_contour_features(cleaned)
        structure_centers, structure_areas, _ = self.detect_structures(cleaned)
        spatial_features = self.analyze_spatial_distribution(structure_centers, structure_areas, gray.shape)
        global_metrics = self.calculate_global_metrics(contour_features, spatial_features)

        if contour_features is None or spatial_features is None or global_metrics is None:
            return None

        return {
            "contour_features": contour_features,
            "spatial_features": spatial_features,
            "global_metrics": global_metrics
        }

    def train(self, image_paths):
        """训练模型（提取前10张图像的特征）"""
        self.reference_features = []

        for i, path in enumerate(image_paths):
            #print(f"处理参考图像 {i + 1}/{len(image_paths)}: {path}")
            features = self.extract_all_features(path)
            if features is not None:
                self.reference_features.append(features)

        if not self.reference_features:
            raise ValueError("未能从任何参考图像中提取特征")

        # 准备特征向量用于标准化
        feature_vectors = []
        for feat in self.reference_features:
            vector = self.features_to_vector(feat)
            feature_vectors.append(vector)

        # 训练标准化器
        self.scaler.fit(feature_vectors)

        print(f"成功处理 {len(self.reference_features)} 张参考图像")
        return self.reference_features

    def features_to_vector(self, features):
        """将特征字典转换为特征向量"""
        contour = features["contour_features"]
        spatial = features["spatial_features"]

        vector = [
            contour["area"],
            contour["perimeter"],
            contour["compactness"],
            contour["fill_ratio"],
            contour["elongation"],
            *contour["hu_moments"],  # 7个Hu矩
            spatial["structure_count"],
            spatial["mean_structure_area"],
            spatial["std_structure_area"],
            spatial["area_ratio"],
            spatial["distribution_elongation"],
            spatial["mean_nearest_neighbor_distance"],
            spatial["std_nearest_neighbor_distance"],
            spatial["nearest_neighbor_index"],
            spatial["mean_cluster_distance"],
            features["global_metrics"]["complexity_index"],
            features["global_metrics"]["balance_index"],
            features["global_metrics"]["concentration_index"],
            features["global_metrics"]["richness_index"],
            features["global_metrics"]["overall_score"]
        ]

        return np.array(vector)

    def calculate_similarity(self, test_image_path):
        print(f"处理测试图像: {test_image_path}")
        test_features = self.extract_all_features(test_image_path)
        # 转换为特征向量并标准化
        test_vector = self.features_to_vector(test_features)
        test_vector_scaled = self.scaler.transform(test_vector.reshape(1, -1))[0]

        # 计算与每个参考图像的相似度
        similarities = []
        for i, ref_feat in enumerate(self.reference_features):
            ref_vector = self.features_to_vector(ref_feat)
            ref_vector_scaled = self.scaler.transform(ref_vector.reshape(1, -1))[0]

            # 使用余弦相似度
            cosine_sim = 1 - spatial.distance.cosine(test_vector_scaled, ref_vector_scaled)
            if np.isnan(cosine_sim):
                cosine_sim = 0

            # 使用欧几里得距离（转换为相似度）
            euclidean_dist = spatial.distance.euclidean(test_vector_scaled, ref_vector_scaled)
            euclidean_sim = 1 / (1 + euclidean_dist / 2.5)  # 应用缩放因子

            # 组合相似度
            combined_sim = (cosine_sim * 0.3 + euclidean_sim * 0.7)
            combined_sim = combined_sim * 2.0  # 评分修正系数
            if combined_sim > 0.99 : combined_sim = 0.99
            similarities.append(combined_sim)

        # 计算总体相似度统计
        mean_similarity = np.mean(similarities)
        std_similarity = np.std(similarities)
        max_similarity = np.max(similarities)
        min_similarity = np.min(similarities)

        # 计算百分位数
        percentile_25 = np.percentile(similarities, 25)
        percentile_50 = np.percentile(similarities, 50)  # 中位数
        percentile_75 = np.percentile(similarities, 75)

        result = {
            "test_image": test_image_path,
            "similarity_scores": [float(s) for s in similarities],
            "mean_similarity": float(mean_similarity),
            "std_similarity": float(std_similarity),
            "max_similarity": float(max_similarity),
            "min_similarity": float(min_similarity),
            "percentile_25": float(percentile_25),
            "percentile_50": float(percentile_50),
            "percentile_75": float(percentile_75),
            "test_features": test_features
        }

        return result

    def save_results(self, results, output_path="similarity_results.json"):
        """保存结果到JSON文件"""

        # 转换numpy数组为列表以便JSON序列化
        def convert_to_serializable(obj):
            if isinstance(obj, np.integer):
                return int(obj)
            elif isinstance(obj, np.floating):
                return float(obj)
            elif isinstance(obj, np.ndarray):
                return obj.tolist()
            elif isinstance(obj, dict):
                return {k: convert_to_serializable(v) for k, v in obj.items()}
            elif isinstance(obj, list):
                return [convert_to_serializable(item) for item in obj]
            else:
                return obj

        serializable_results = convert_to_serializable(results)

        with open(output_path, "w") as f:
            json.dump(serializable_results, f, indent=2)

        print(f"结果已保存到 {output_path}")

    def generate_report(self, results):
        """生成相似性分析报告"""
        print("\n" + "=" * 50)
        print("园林平面图相似性分析报告")
        print("=" * 50)

        print(f"平均相似度: {results['mean_similarity']:.4f}")
        print(f"相似度标准差: {results['std_similarity']:.4f}")
        print(f"最高相似度: {results['max_similarity']:.4f}")
        print(f"最低相似度: {results['min_similarity']:.4f}")
        print(f"相似度中位数: {results['percentile_50']:.4f}")

        print("\n相似度分布:")
        for i, sim in enumerate(results['similarity_scores']):
            print(f"  与参考图像 {i + 1} 的相似度: {sim:.4f}")

        # 解释相似度水平
        mean_sim = results['mean_similarity']
        if mean_sim >= 0.8:
            similarity_level = "非常高"
        elif mean_sim >= 0.6:
            similarity_level = "较高"
        elif mean_sim >= 0.4:
            similarity_level = "中等"
        elif mean_sim >= 0.2:
            similarity_level = "较低"
        else:
            similarity_level = "非常低"

        print(f"\n总体评价: 测试图像与参考图像集的相似度{similarity_level}")

        # 显示测试图像的特征
        test_features = results['test_features']
        print(f"\n测试图像特征:")
        print(f"  轮廓面积: {test_features['contour_features']['area']:.2f}")
        print(f"  结构数量: {test_features['spatial_features']['structure_count']}")
        print(f"  复杂度指数: {test_features['global_metrics']['complexity_index']:.4f}")
        print(f"  均衡性指数: {test_features['global_metrics']['balance_index']:.4f}")
        print(f"  集中度指数: {test_features['global_metrics']['concentration_index']:.4f}")
        print(f"  丰富度指数: {test_features['global_metrics']['richness_index']:.4f}")
        print(f"  综合评分: {test_features['global_metrics']['overall_score']:.4f}")


# 使用示例
def main():
    # 初始化分析器
    analyzer = GardenSimilarityAnalyzer()

    # 假设有11张图像路径
    # 前10张是参考图像，第11张是测试图像
    image_dir = "..\\data\\img\\"  # 替换为您的图像目录
    image_paths = [
        os.path.join(image_dir, f"{i}.jpg") for i in range(1, 12)
    ]

    # 确保图像存在
    existing_paths = [p for p in image_paths if os.path.exists(p)]
    if len(existing_paths) < 11:
        print(f"警告: 只找到 {len(existing_paths)} 张图像，需要11张")
        # 在这里您可以调整代码以处理图像数量不足的情况

    # 分离参考图像和测试图像
    reference_paths = image_paths[:10]
    test_path = image_paths[10]

    # 训练模型（提取前10张图像的特征）
    print("开始处理参考图像...")
    reference_features = analyzer.train(reference_paths)

    # 计算第11张图像的相似性
    print("开始计算相似性...")
    results = analyzer.calculate_similarity(test_path)

    # 生成报告
    analyzer.generate_report(results)

    # 保存结果
    analyzer.save_results(results)

    # 可选：可视化结果
    # 您可以添加代码来可视化特征和相似性结果


if __name__ == "__main__":
    main()
