import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from AudioParser import AudioParser
from HyperParm import HyperParm
from skimage.transform import resize
import librosa, torch, cv2

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class ImageHelper:
    def gray_to_color(image: np.ndarray, color=(0, 165, 255)):
        """
        将灰度图像转换为单一颜色的图像。

        参数:
            image_path (str): 输入灰度图像的路径。
            output_path (str): 输出图像的保存路径。
            color (tuple): 目标颜色的 BGR 值，默认为橙色 (0, 165, 255)。

        返回:
            result (numpy.ndarray): 处理后的图像。
        """
        # 读取灰度图像
        gray_image = image

        # 将灰度图像转换为三通道图像
        gray_image_3ch = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)

        # 创建一个全色的图像
        color_image = np.zeros_like(gray_image_3ch)
        color_image[:] = color

        # 将灰度图像的值映射到目标颜色
        # 使用灰度图像的值作为 alpha 值来混合颜色图像
        result = cv2.addWeighted(gray_image_3ch, 1.0, color_image, 0.5, 0)

        return result

    def overlay_images(
        image_path1: str,
        image_path2: str,
        output_path: str,
        alpha1=1.0,
        alpha2=1.0,
    ):
        """
        缩放并叠加两个PNG图像，并保存结果。

        :param image_path1: 第一张图像的路径
        :param image_path2: 第二张图像的路径
        :param output_path: 输出图像的路径
        :param alpha1: 第一张图像的透明度，默认为1.0
        :param alpha2: 第二张图像的透明度，默认为1.0
        """
        # 读取两张PNG图像
        img1 = cv2.imread(image_path1, cv2.IMREAD_UNCHANGED)  # 图像1，包含透明通道
        img2 = cv2.imread(image_path2, cv2.IMREAD_UNCHANGED)  # 图像2，包含透明通道

        # 缩放图像2到图像1的大小
        img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))

        # 确保两个图像都有透明通道
        if img1.shape[2] == 4 and img2.shape[2] == 4:
            # 分离出图像的RGBA通道
            bgr1, a1 = cv2.split(img1)[:3], img1[:, :, 3]  # 图像1的BGR和透明度
            bgr2, a2 = cv2.split(img2)[:3], img2[:, :, 3]  # 图像2的BGR和透明度

            # 创建透明度掩码
            alpha1 = a1 / 255.0 * alpha1
            alpha2 = a2 / 255.0 * alpha2

            # 混合图像：加权叠加BGR通道
            output_bgr = cv2.addWeighted(bgr1, alpha1, bgr2, alpha2, 0)

            # 合并透明通道
            output_alpha = np.maximum(a1, a2)  # 透明度取两者最大值
            output = cv2.merge([output_bgr, output_alpha.astype(np.uint8)])
        else:
            # 如果没有透明通道，直接叠加
            output = cv2.addWeighted(img1, alpha1, img2, alpha2, 0)

        # 保存结果
        cv2.imwrite(output_path, output)

    def save_mel(mel: np.ndarray, save_path: str, need_metric: bool = False):
        fig, ax = plt.subplots(figsize=(10, 6))
        img = librosa.display.specshow(
            mel,
            sr=HyperParm.SAMPLE_RATE,
            y_axis="mel",
            fmin=HyperParm.FMIN,
            fmax=HyperParm.FMAX,
            x_axis="time",
            ax=ax,
        )
        if need_metric:
            plt.axis()  # Turn off axes
            fig.colorbar(img, format="%+2.0f dB")
        else:
            plt.axis("off")  # Turn off axes
            plt.tight_layout(pad=0)  # Remove padding around the plot
            plt.subplots_adjust(left=0, right=1, top=1, bottom=0)  # Remove margins
        # plt.show()
        plt.savefig(save_path)
        plt.close()

    def save_chroma(
        chroma: np.ndarray,
        save_path: str,
        sr: int = HyperParm.SAMPLE_RATE,
        need_metric: bool = False,
    ):

        # Create the plot
        fig, ax = plt.subplots(figsize=(10, 6))
        img = librosa.display.specshow(
            chroma,
            sr=sr,
            y_axis="chroma",
            x_axis="time",
            ax=ax,
        )
        if need_metric:
            plt.axis()
            plt.tight_layout()
            fig.colorbar(img, format="%+2.0f dB")
        else:
            plt.axis("off")  # Turn off axes
            plt.tight_layout(pad=0)  # Remove padding around the plot
            plt.subplots_adjust(left=0, right=1, top=1, bottom=0)  # Remove margins
            # plt.savefig(filename, bbox_inches='tight', pad_inches=0)  # Save without border
        # plt.show()
        plt.savefig(save_path)
        plt.close()

    def save_spectral_contrast(
        contrast: np.ndarray,
        save_path: str,
        sr: int = HyperParm.SAMPLE_RATE,
        need_metric: bool = False,
    ):

        # Create the plot
        fig, ax = plt.subplots(figsize=(10, 6))
        img = librosa.display.specshow(
            contrast,
            sr=sr,
            x_axis="time",
            # y_axis="contrast",
            ax=ax,
        )
        if need_metric:
            plt.axis()
            plt.tight_layout()
            fig.colorbar(img, format="%+2.0f dB")
        else:
            plt.axis("off")  # Turn off axes
            plt.tight_layout(pad=0)  # Remove padding around the plot
            plt.subplots_adjust(left=0, right=1, top=1, bottom=0)  # Remove margins
        plt.savefig(save_path)
        plt.close()

    def save_cam(cam: np.ndarray, save_path: str, alpha: float = 0.5):

        heatmap = np.uint8(255 * cam)
        color_heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
        color_heatmap_with_alpha = cv2.merge(
            [color_heatmap, np.full(heatmap.shape, 255 * alpha, dtype=np.uint8)]
        )
        cv2.imwrite(save_path, color_heatmap_with_alpha)

    def overlay_images(image1_path, image2_path, output_path):
        # 打开第一张图片
        image1 = Image.open(image1_path).convert("RGBA")

        # 打开第二张图片
        image2 = Image.open(image2_path).convert("RGBA")

        # 将第二张图片缩放到第一张图片的大小
        image2 = image2.resize(image1.size)

        # 将两张图片叠加
        combined = Image.alpha_composite(image1, image2)

        # 保存结果
        combined.save(output_path, "PNG")


# 定义Grad-CAM类
class GradCAM:
    def __init__(self, model):
        self.model = model.eval()
        self.features = None
        self.gradients = None

        # 注册钩子
        target_layer = self.model.layer4[-1].conv3
        target_layer.register_forward_hook(self.save_features)
        target_layer.register_backward_hook(self.save_gradients)

        # others
        self.audioParser = AudioParser()

    def save_features(self, module, input, output):
        self.features = output.detach()  # (1, C, H, W)

    def save_gradients(self, module, grad_input, grad_output):
        self.gradients = grad_output[0].detach()  # (1, C, H, W)

    def forward(self, x):
        return self.model(x)

    def backward(self, outputs, target_class):
        self.model.zero_grad()
        loss = outputs[:, target_class].sum()
        loss.backward(retain_graph=True)

    def get_cam(self):
        # 7. 计算热力图
        features = self.features[0]  # (C, H, W)
        gradients = self.gradients[0]  # (C, H, W)

        # 计算通道权重
        weights = torch.mean(gradients, dim=[1, 2])  # (C,)

        # 生成CAM
        cam = torch.zeros(features.shape[1:], dtype=torch.float32).to(device)  # (H, W)
        for i, w in enumerate(weights):
            cam += w * features[i, :, :]

        # ReLU激活
        cam = torch.relu(cam)
        # cam = (cam - cam.min()) / (cam.max() - cam.min())
        return cam.to("cpu").numpy()

    def get_pic(self, data_path: str):
        self.audioParser.load_audio(data_path)
        splitted_data = self.audioParser.split_audio(HyperParm.N_SPLIT)

        heatmaps = []
        mels = []
        chromas = []
        spectral_contrasts = []

        for data in splitted_data:
            mels.append(self.audioParser.get_mel(data))
            audio_mel = resize(mels[-1], HyperParm.PIC_SIZE)

            chromas.append(self.audioParser.get_chroma(data))
            audio_chroma = resize(chromas[-1], HyperParm.PIC_SIZE)

            spectral_contrasts.append(self.audioParser.get_spectral_contrast(data))
            audio_spectral_contrast = resize(spectral_contrasts[-1], HyperParm.PIC_SIZE)

            audio_res = torch.tensor(
                np.stack(
                    [audio_mel, audio_chroma, audio_spectral_contrast], axis=0
                ).reshape(-1, *HyperParm.DATA_SIZE),
                dtype=torch.float32,
            ).to(device)
            outputs = self.forward(audio_res)

            _, pred_class = torch.max(outputs, 1)
            target_class = pred_class.item()

            # 6. 反向传播
            self.backward(outputs, target_class)
            heatmaps.append(self.get_cam())

        heatmap = np.concatenate(heatmaps, axis=1)
        heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min())

        mel = np.concatenate(mels, axis=1)
        chroma = np.concatenate(chromas, axis=1)
        spectral_contrast = np.concatenate(spectral_contrasts, axis=1)

        return heatmap, mel, chroma, spectral_contrast


def visualize(model_path: str, data_path: str, save_prefix: str):
    model = torch.load(
        model_path,
        weights_only=False,
    )
    grad_cam_worker = GradCAM(model)

    cam, mel, chroma, spectral_contrast = grad_cam_worker.get_pic(data_path)

    ImageHelper.save_mel(mel, save_path=f"{save_prefix}_mel.png")
    ImageHelper.save_chroma(
        chroma,
        save_path=f"{save_prefix}_chroma.png",
    )
    ImageHelper.save_spectral_contrast(
        spectral_contrast,
        save_path=f"{save_prefix}_spectral_contrast.png",
    )
    ImageHelper.save_cam(cam, f"{save_prefix}_heatmap.png")

    ImageHelper.overlay_images(
        f"{save_prefix}_mel.png",
        f"{save_prefix}_heatmap.png",
        f"{save_prefix}_cam_mel.png",
    )
    ImageHelper.overlay_images(
        f"{save_prefix}_chroma.png",
        f"{save_prefix}_heatmap.png",
        f"{save_prefix}_cam_chroma.png",
    )
    ImageHelper.overlay_images(
        f"{save_prefix}_spectral_contrast.png",
        f"{save_prefix}_heatmap.png",
        f"{save_prefix}_cam_spectral_contrast.png",
    )


if __name__ == "__main__":
    visualize(
        model_path="",
        data_path="/home/tuchunxu/workspace/pr-project/pr-project/data/test/classical/classical.00028.wav",
        save_prefix="/home/tuchunxu/workspace/pr-project/pr-project/pic/tmp/test",
    )
