import os
import numpy as np
from spectral import *
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
from nimfa import Nmf
import io
from sklearn.decomposition import NMF
from ObjectGenerator_depressed import CrownShape, LAD, LeafShape, CrownGenerator


def calculate_sad(original, reconstructed):
    """
    计算端元光谱曲线的 SAD（Spectral Angle Distance）。

    参数：
        - original: 原始端元光谱曲线。
        - reconstructed: 重构的端元光谱曲线。

    返回值：
        - sad: SAD 值。
    """
    # 将原始端元光谱曲线重复扩展为与重构的端元光谱曲线相同的形状
    original_repeated = np.repeat(original[np.newaxis, :, :], reconstructed.shape[0], axis=0)

    # 计算点积
    dot_product = np.einsum('ijk,ij->i', original_repeated, reconstructed)

    # 计算每个样本的 SAD
    norms_original = np.linalg.norm(original_repeated, axis=(1, 2))
    norms_reconstructed = np.linalg.norm(reconstructed, axis=1)
    cos_similarity = dot_product / (norms_original * norms_reconstructed)

    sad = np.mean(np.arccos(cos_similarity))

    return sad


class UtilD:
    # 获取数组中的分类
    @staticmethod
    def getCategoryArr(arr):
        res = []
        for item in arr:
            if item[0] not in res:
                res.append(item[0])
        return res

    @staticmethod
    def classifyKmeans(fileUrl, result_folder, uuid):
        img = open_image(fileUrl)
        data = img.load()
        height, width, bands = img.shape
        # 将数据转换为二维numpy数组
        pixels = data.reshape(-1, data.shape[-1])

        # 应用K-means算法，指定类别数为3
        n_clusters = 3
        kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(pixels)
        labels = kmeans.labels_.reshape(data.shape[:-1])
        # 保存聚类结果图像
        plt.imshow(labels, cmap='viridis')  # 使用不同的颜色映射显示聚类结果
        plt.title('K-means Clustering Result')
        plt.colorbar(label='Cluster Labels')
        result_image_path = os.path.join(result_folder, 'cluster_result' + uuid + '.png')
        plt.savefig(result_image_path)  # 保存聚类结果图像
        plt.close()

        # 添加图例
        for i in range(n_clusters):
            plt.text(0, i, f'Cluster {i}', color='white', fontsize=10, fontweight='bold')

        # 定义类别名称
        class_names = {0: 'house', 1: 'grass', 2: 'tree01'}

        # 添加图例
        # 创建一个文件夹，用于保存各类别的位置信息
        class_coords_folder = os.path.join(result_folder, 'class_cluster_coords')
        os.makedirs(class_coords_folder, exist_ok=True)

        # 创建一个文件，用于保存所有类别的位置信息
        # 保存位置信息文件，每一行带有类别名称
        cluster_coords = {}
        for i in range(n_clusters):
            cluster_coords[i] = np.argwhere(labels == i)

        file_path_all = os.path.join(result_folder, 'all_cluster_coords.txt')
        with open(file_path_all, 'w') as all_coords_file:
            # Save pixel coordinates of each cluster to a text file
            for i, coords in cluster_coords.items():
                # Select every 13th point for 'house' and 'tree01', and every 5th point for 'grass'
                if i == 0 or i == 2:
                    sparse_coords = coords[::13]
                elif i == 1:  # 'grass'
                    sparse_coords = coords[::5]
                else:
                    sparse_coords = coords  # Select all points for other clusters

                # Build file path for each class
                file_path_class = os.path.join(class_coords_folder, f'class_{class_names[i]}_coords.txt')
                with open(file_path_class, 'w') as class_coords_file:
                    # Write class location information to the corresponding file
                    for coord in sparse_coords:
                        class_coords_file.write(f'{class_names[i]} {coord[0]:.1f} {coord[1]:.1f} 0.0 0.0\n')
                        # Write class location information to the overall file
                        all_coords_file.write(f'{class_names[i]} {coord[0]:.1f} {coord[1]:.1f} 0.0 0.0\n')
        # return 'cluster_result' + uuid + '.png'
        return {"all": "all_cluster_coords.txt",
                "house": "class_cluster_coords/class_house_coords.txt",
                "grass": "class_cluster_coords/class_grass_coords.txt",
                "tree01": "class_cluster_coords/class_tree01_coords.txt",
                "img": 'cluster_result' + uuid + '.png', }

    @staticmethod
    def MaximumAbundanceClassification(fileUrl, result_folder, uuid):
        img = open_image(fileUrl)
        data = img.load()
        # 从波段中读取丰度数据
        abundance_data = np.zeros((data.shape[-1], data.shape[0], data.shape[1]))
        for i in range(data.shape[-1]):
            abundance_band = data.read_band(i + 1)
            if abundance_band.size > 0:
                abundance_data[i] = abundance_band

        # 对每个像素位置进行最大丰度分类
        classified_image = np.argmax(abundance_data, axis=0)
        plt.imshow(classified_image, cmap='jet')
        plt.colorbar()
        result_image_path = os.path.join(result_folder, 'MAC_result' + uuid + '.png')
        plt.savefig(result_image_path)
        # 输出分类结果中类别为 100 到 140 的像素点位置文本，隔 3 个点取点
        file_path_all = os.path.join(result_folder, 'MAC_tree.txt')
        with open(file_path_all, 'w') as f:
            for class_id in range(100, 145):
                positions = np.argwhere(classified_image == class_id)
                sparse_positions = positions[::7]

                for pos in sparse_positions:
                    f.write(f"tree01 {pos[0]} {pos[1]} 0.0 0.0\n")

        return {"tree": "MAC_tree.txt",
                "img": 'MAC_result' + uuid + '.png', }

    @staticmethod
    def Fourcomponent(fileUrl, result_folder, uuid):
        img = open_image(fileUrl)
        data = img.load()
        # 假设波段1到5分别为四分量类别和各个类别的比例
        class_label = data[:, :, 0]  # 波段1为类别标签
        endmember_proportions = data[:, :, 1:]  # 波段2到5为端元的丰度比例

        # 可视化每个像素所属的四分量类别和各个类别的比例
        plt.figure(figsize=(12, 8))

        # 可视化四分量类别
        plt.subplot(2, 3, 1)
        plt.imshow(class_label, cmap='viridis')
        plt.colorbar()
        plt.title('Component Class')

        # 可视化光照土壤比例
        plt.subplot(2, 3, 2)
        plt.imshow(endmember_proportions[:, :, 0], cmap='viridis')
        plt.colorbar()
        plt.title('Bright Soil Proportion')

        # 可视化光照叶片比例
        plt.subplot(2, 3, 3)
        plt.imshow(endmember_proportions[:, :, 1], cmap='viridis')
        plt.colorbar()
        plt.title('Bright Vegetation Proportion')

        # 可视化阴影土壤比例
        plt.subplot(2, 3, 4)
        plt.imshow(endmember_proportions[:, :, 2], cmap='viridis')
        plt.colorbar()
        plt.title('Shadow Soil Proportion')

        # 可视化阴影叶片比例
        plt.subplot(2, 3, 5)
        plt.imshow(endmember_proportions[:, :, 3], cmap='viridis')
        plt.colorbar()
        plt.title('Shadow Vegetation Proportion')

        result_image_path = os.path.join(result_folder, 'Fourcomponent_result' + uuid + '.png')
        plt.savefig(result_image_path)
        return {"img": 'Fourcomponent_result' + uuid + '.png', }

    @staticmethod
    def HyperspectralView(fileUrl, path, image_Url, index):
        hdr_image = open_image(fileUrl)
        hdr_data = hdr_image.load()
        band_idx = index - 1
        # 获取波段数量
        num_bands = hdr_data.shape[2]
        print("波段数量", num_bands)
        # 处理每个波段并保存为字节流
        band_images = []
        if index > num_bands:
            return {"status": False, "num_bands": num_bands}
        for i in range(num_bands):
            plt.imshow(hdr_data[:, :, i], cmap='gray')
            plt.axis('off')
            plt.title(f'Band {i + 1}')
            buf = io.BytesIO()
            plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0.1)
            buf.seek(0)
            band_images.append(buf.getvalue())
            plt.close()

        path = '存储文件目录'
        name = 'test.png'
        # 生成文件目录
        if not os.path.exists(path):
            os.makedirs(path)
        # 创建图片
        with open(image_Url, "wb+") as f:
            f.write(band_images[band_idx])
        return {"status": True, "num_bands": num_bands}

    @staticmethod
    def WindChartView(fileUrl, file_path_name):
        # 加载超光谱图像数据
        hdr_image = open_image(fileUrl)
        data = hdr_image.load()

        # 调整数据形状以进行NMF
        X = data.reshape(-1, data.shape[-1])

        # 使用NMF进行解混
        num_endmembers = 4  # 假设有四种端元
        if data.shape[2] < num_endmembers:
            num_endmembers = data.shape[2]
        nmf = NMF(n_components=num_endmembers, init='nndsvd', random_state=0)
        endmember_proportions = nmf.fit_transform(X)

        # 将混合像素的丰度比例调整回图像形状
        endmember_proportions_img = endmember_proportions.reshape(data.shape[:-1] + (num_endmembers,))

        # 可视化每个像素处端元的丰度比例
        plt.figure(figsize=(12, 8))
        for i in range(num_endmembers):
            plt.subplot(2, 2, i + 1)
            plt.imshow(endmember_proportions_img[:, :, i], cmap='viridis')
            plt.colorbar()
            plt.title(f'Endmember {i + 1} Proportion')
        print(file_path_name)
        plt.savefig(file_path_name, bbox_inches='tight', pad_inches=0.1)
        return file_path_name

    @staticmethod
    def Endelement(fileUrl, file_path_name, x, y):
        # 加载超光谱图像数据
        hdr_image = open_image(fileUrl)
        hdr_data = hdr_image.load()

        print(hdr_image.ncols, hdr_image.nrows, hdr_data.shape)
        # 假设感兴趣的像素位置为(row, col)//可以前端输入
        row, col = x, y

        # 提取感兴趣像素位置的反射率数据
        reflectance_curve = hdr_data[row, col, :]

        # 如果没有明确指定波长范围，可以使用HDR文件中的默认波长范围
        wavelength_range = hdr_image.bands.centers

        # 确保反射率曲线是一维数组
        reflectance_curve = reflectance_curve.flatten()

        # 绘制端元曲线
        plt.figure(figsize=(8, 6))
        plt.plot(wavelength_range, reflectance_curve, color='b')
        plt.title('Endmember Curve')
        plt.xlabel('Wavelength (nm)')
        plt.ylabel('Reflectance')
        plt.grid(True)

        # 保存图像到本地
        print(file_path_name)
        plt.savefig(file_path_name, bbox_inches='tight', pad_inches=0.1)
        return file_path_name

    @staticmethod
    def nmtf_and_plot_endmember_spectra(hdr_image_path, file_path_name):
        """
        利用 NMTF 进行解混并绘制端元光谱曲线。

        参数：
            - hdr_image_path: 本地 HDR 图像的路径。
            - num_components: NMTF 使用的成分数量。
        """
        # 读取本地 HDR 图像
        hdr_image = open_image(hdr_image_path)
        data = hdr_image.load()

        # 将数据形状转换为 (N*M) x L，其中 N 和 M 是图像的尺寸，L 是波段数
        X = data.reshape(-1, data.shape[-1])

        # 使用 NMTF 进行解混
        num_components = 3  # 假设有三种端元
        if data.shape[2] < num_components:
            num_components = data.shape[2]
        nmtf = Nmf(X, seed="nndsvd", max_iter=200, update='euclidean', rank=num_components, track_error=True)
        nmtf_fit = nmtf()

        # 获取端元光谱曲线和端元比例
        endmember_spectra = np.array(nmtf_fit.basis())
        endmember_proportions = np.array(nmtf_fit.coef())

        # 将所有负值替换为零
        endmember_spectra[endmember_spectra < 0] = 0
        endmember_proportions[endmember_proportions < 0] = 0

        # 归一化重构的端元光谱曲线到 [0, 1] 范围内
        max_values = np.max(endmember_spectra, axis=-1, keepdims=True)
        endmember_spectra /= max_values

        # 绘制端元光谱曲线
        plt.figure(figsize=(10, 6))
        for i in range(num_components):
            plt.plot(endmember_proportions[i], label=f'Endmember {i + 1}')

        plt.title('Endmember Spectral Curves (NMTF)')
        plt.xlabel('Band')
        plt.ylabel('Reflectance')
        plt.legend()
        plt.grid(True)
        # plt.show()

        # 保存图像到本地
        print(file_path_name)
        plt.savefig(file_path_name, bbox_inches='tight', pad_inches=0.1)
        return file_path_name
