import math

import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.utils.sam_tools import visualize_img_, normalize_array, visualize_3d_score
from mmcv.image import tensor2imgs
from typing import Union, List, Tuple


class RobustMinMaxNorm:
    def __init__(self,
                 epsilon: float = 1e-8,
                 feature_axes: Union[int, List[int], Tuple[int]] = None,
                 clamp_output: bool = False):
        """
        鲁棒的归一化工具

        Args:
            epsilon: 防止除零的小量 (默认1e-8)
            feature_axes: 特征维度 (None表示全局归一化)
                          Example:
                          - 图像数据: feature_axes=(2,3) 对H,W维度归一化
                          - 时序数据: feature_axes=(0,1) 对batch和time维度归一化
            clamp_output: 是否将输出限制在[0,1] (针对异常值)
        """
        self.epsilon = epsilon
        self.feature_axes = feature_axes
        self.clamp_output = clamp_output

    def __call__(self, data: torch.Tensor) -> torch.Tensor:
        # 维度自动推断
        if self.feature_axes is None:
            reduce_dims = tuple(range(data.dim()))
        else:
            reduce_dims = self.feature_axes

        # 计算统计量
        data_min = data.min(dim=reduce_dims, keepdim=True).values
        data_max = data.max(dim=reduce_dims, keepdim=True).values

        # 安全缩放
        scale = data_max - data_min
        scale[scale < self.epsilon] = 1.0  # 处理零范围情况

        # 归一化计算
        normalized = (data - data_min) / scale

        # 异常值截断
        if self.clamp_output:
            normalized = torch.clamp(normalized, 0.0, 1.0)

        return normalized


class SafePooling(nn.Module):
    def __init__(self, pool_type='max', kernel_size=3):
        super().__init__()
        # 强制 kernel_size 为奇数
        assert kernel_size % 2 == 1, "kernel_size must be odd"
        assert pool_type in ('max', 'min', 'avg'), "Unsupported pool type"

        self.kernel_size = kernel_size
        self.pool_type = pool_type

        pad_size = (kernel_size - 1) // 2
        self.pad = nn.ReflectionPad2d(pad_size)

        if self.pool_type == 'max':
            self.pool = nn.MaxPool2d(kernel_size, stride=1, padding=0)
        elif self.pool_type == 'avg':
            self.pool = nn.AvgPool2d(kernel_size, stride=1, padding=0, count_include_pad=False)
        elif self.pool_type == 'min':
            self.pool = nn.MaxPool2d(kernel_size, stride=1, padding=0)

    def forward(self, x):
        x = self.pad(x)
        if self.pool_type == 'min':
            x = -self.pool(-x)  # 通过取反实现最小池化
        else:
            x = self.pool(x)
        return x


class RegionRelateEntropyScore(nn.Module):

    def __init__(self, in_channels=19, padding_mode='zeros', size=33):
        """
        purity_conv: size*size
        entropy_conv: size*size
        """
        super(RegionRelateEntropyScore, self).__init__()
        self.in_channels = in_channels
        assert size % 2 == 1, "error size"
        self.purity_conv = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=size,
                                     stride=1, padding=int(size / 2), bias=False,
                                     padding_mode=padding_mode, groups=in_channels)  # Yang： 关键是group实现了卷积参数的分组，使之可以统计部分特征层的值。
        weight = torch.ones((size, size), dtype=torch.float32)
        weight = weight.unsqueeze(dim=0).unsqueeze(dim=0)
        weight = weight.repeat([in_channels, 1, 1, 1])
        weight = nn.Parameter(weight)
        self.purity_conv.weight = weight
        self.purity_conv.requires_grad_(False)

        self.entropy_conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=size,
                                      stride=1, padding=int(size / 2), bias=False,
                                      padding_mode=padding_mode)
        weight = torch.ones((size, size), dtype=torch.float32)
        weight = weight.unsqueeze(dim=0).unsqueeze(dim=0)
        weight = nn.Parameter(weight)
        self.entropy_conv.weight = weight
        self.entropy_conv.requires_grad_(False)
        # self.avg_fn = nn.AvgPool2d(size)
        # self.max_fn = nn.MaxPool2d(size)
        self.avg_fn = SafePooling('avg', size)
        self.max_fn = SafePooling('max', size)
        self.min_fn = SafePooling('min', size)

    def _cal_harmonic_avg(self, data1, data2, ):
        return (data1 * data2) / (data1 + data2 + 1e-9)

    def normalize_score(self, data):
        data = (data - data.min()) / (data.max() - data.min() + 1e-9)
        # nn.functional.normalize()
        return data

    def forward(self, logit):
        """
        return:
            score, purity, entropy
        """
        logit = logit.squeeze(dim=0)  # [19, h ,w]
        p = torch.softmax(logit, dim=0)  # [19, h, w]

        pixel_entropy = torch.sum(-p * torch.log(p + 1e-6), dim=0).unsqueeze(dim=0).unsqueeze(dim=0) / math.log(self.in_channels)  # [1, 1, h, w]

        predict = torch.argmax(p, dim=0)  # [h, w]
        one_hot = F.one_hot(predict, num_classes=self.in_channels).float()
        one_hot = one_hot.permute((2, 0, 1)).unsqueeze(dim=0)  # [1, 19, h, w]
        summary = self.purity_conv(one_hot)  # [1, 19, h, w]
        count = torch.sum(summary, dim=1, keepdim=True)  # [1, 1, h, w]
        dist = summary / count  # [1, 19, h, w]
        region_impurity = torch.sum(-dist * torch.log(dist + 1e-6), dim=1, keepdim=True) / math.log(self.in_channels)  # [1, 1, h, w]
        region_entropy = self.entropy_conv(pixel_entropy) / count  # [1, 1, h, w]
        # region_relate_entropy = self.entropy_conv(-pixel_entropy * torch.log(pixel_entropy / region_entropy + 1e-6)) / count

        region_relate_entropy = self.entropy_conv(pixel_entropy * torch.log(pixel_entropy / region_entropy + 1e-6)) / count
        region_relate_entropy_easy = self.max_fn(pixel_entropy) - self.min_fn(pixel_entropy)

        PALETTE = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
                   [255, 255, 0], [255, 0, 0]]
        # visualize_img_(predict.data.cpu().numpy(), palette=PALETTE,show=True)

        # visualize_img_(normalize_array(region_impurity[0][0].data.cpu().numpy()),show=True,cmap='plasma')
        #
        # visualize_img_(normalize_array(pixel_entropy[0][0].data.cpu().numpy()),show=True,cmap='Blues',save_path='./viz_dir/pipeline3_draw/ent.png')
        # visualize_img_(predict.data.cpu().numpy(),show=True,palette=PALETTE,save_path='./viz_dir/pipeline3_draw/pred.png')
        # visualize_img_(normalize_array(region_entropy[0][0].data.cpu().numpy()),show=True,cmap='Blues',title='region_entropy')

        # visualize_img_(normalize_array(dist[0][0].data.cpu().numpy()),show=True,cmap='Blues',save_path='./viz_dir/pipeline3_draw/dist0.png')
        # visualize_img_(normalize_array(dist[0][1].data.cpu().numpy()),show=True,cmap='Yellows',save_path='./viz_dir/pipeline3_draw/dist1.png')
        # visualize_img_(normalize_array(dist[0][2].data.cpu().numpy()),show=True,cmap='Greens',save_path='./viz_dir/pipeline3_draw/dist2.png')
        # visualize_img_(normalize_array(count[0][0].data.cpu().numpy()),show=True,cmap='Blues',save_path='./viz_dir/pipeline3_draw/count.png')

        # visualize_img_(normalize_array(dist[0][0].data.cpu().numpy()),show=True,palette=[[0,0,0],[255,255,255]])
        # visualize_img_(normalize_array(dist[0][1].data.cpu().numpy()),show=True,palette=[[0,0,0],[255,255,255]])
        # visualize_img_(normalize_array(dist[0][2].data.cpu().numpy()),show=True,palette=[[0,0,0],[255,255,255]])
        # visualize_img_(normalize_array(dist[0][3].data.cpu().numpy()),show=True,palette=[[0,0,0],[255,255,255]])
        # visualize_img_(normalize_array(dist[0][4].data.cpu().numpy()),show=True,palette=[[0,0,0],[255,255,255]])
        # visualize_img_(normalize_array(dist[0][5].data.cpu().numpy()),show=True,palette=[[0,0,0],[255,255,255]])
        #
        # visualize_img_(normalize_array(region_impurity[0][0].data.cpu().numpy()),show=True,cmap='Blues',save_path='./viz_dir/pipeline3_draw/impurity.png')
        # visualize_img_(normalize_array(region_impurity[0][0].data.cpu().numpy()),show=True,cmap='Blues',)
        # visualize_img_(normalize_array(region_relate_entropy[0][0].data.cpu().numpy()),show=True,cmap='Blues',save_path='./viz_dir/pipeline3_draw/relate_entropy.png')
        # visualize_img_(normalize_array(region_relate_entropy_2[0][0].data.cpu().numpy()),show=True,cmap='Blues')
        #
        # visualize_img_(normalize_array((region_impurity * region_relate_entropy)[0][0].data.cpu().numpy()),show=True,cmap='Blues')
        # visualize_img_(normalize_array((self._cal_harmonic_avg(self.normalize_score(region_impurity),self.normalize_score(region_relate_entropy)))[0][0].data.cpu().numpy()),show=True,cmap='Blues')
        # visualize_img_(normalize_array((region_impurity * region_entropy)[0][0].data.cpu().numpy()),show=True,cmap='Blues')

        # visualize_img_(normalize_array((pixel_entropy * torch.log(pixel_entropy / region_entropy + 1e-6))[0][0].data.cpu().numpy()), show=True, cmap='Blues')
        # visualize_img_(normalize_array((region_relate_entropy)[0][0].data.cpu().numpy()), show=True, cmap='Blues')
        # visualize_3d_score(normalize_array((region_relate_entropy)[0][0].data.cpu().numpy()),stride=8, show=True, cmap='Blues',save_path='./viz_dir/test.svg')
        # visualize_3d_score(normalize_array((region_impurity)[0][0].data.cpu().numpy()),stride=8, show=True,surface_density=5, cmap='Blues',save_path='./viz_dir/test.svg')

        # score = torch.rand(region_entropy.shape) # random
        # score = pixel_entropy
        # score = region_entropy
        # score = region_impurity
        # score = region_relate_entropy
        # score = region_impurity * region_entropy
        # score = region_impurity * region_relate_entropy
        # score =self.normalize_score(region_impurity)*self.normalize_score(region_relate_entropy)
        score = self._cal_harmonic_avg(self.normalize_score(region_impurity), self.normalize_score(region_relate_entropy))
        # score =region_relate_entropy_easy
        return (score.squeeze(dim=0).squeeze(dim=0),
                region_impurity.squeeze(dim=0).squeeze(dim=0),
                region_entropy.squeeze(dim=0).squeeze(dim=0))

    def cal_impurity_by_predict(self, predict):
        one_hot = F.one_hot(predict, num_classes=self.in_channels).float()
        one_hot = one_hot.permute((2, 0, 1)).unsqueeze(dim=0)  # [1, 19, h, w]
        summary = self.purity_conv(one_hot)  # [1, 19, h, w]
        count = torch.sum(summary, dim=1, keepdim=True)  # [1, 1, h, w]
        dist = summary / count  # [1, 19, h, w]
        region_impurity = torch.sum(-dist * torch.log(dist + 1e-6), dim=1, keepdim=True) # / math.log(self.in_channels)  # [1, 1, h, w]
        return region_impurity.squeeze()  # [h, w]
