import torch
from torch import nn
from torch.nn import functional as F
import math

'''
segment_image为ai生成。
其目标是根据给定的阈值,给出每个像素应当从属的类别，是一种分块策略。
太难写了，由'豆包'ai生成。提示词如下：
PROMPT:如果我给你一张图，的embedding表示（b c h w），你能否通过欧氏距离，根据给定阈值，按照相连像素相似度超过阈值就放到一起的原则，给图像分块
'''
def segment_image(embedding, threshold):
    b, c, h, w = embedding.shape
    assert b == 1, "Only batch size 1 supported"
    device = embedding.device
    embedding = embedding[0]  # (c, h, w)

    # 每个像素展平成一个向量 (h*w, c)
    pixels = embedding.view(c, -1).T  # (N, C)
    N = h * w

    # 创建8邻域索引对
    idx = torch.arange(N, device=device).reshape(h, w)
    neighbors = []

    # 定义偏移量（8邻域）
    directions = [(-1, -1), (-1, 0), (-1, 1),
                  (0, -1),          (0, 1),
                  (1, -1),  (1, 0), (1, 1)]

    for dy, dx in directions:
        y1 = torch.arange(h, device=device)
        x1 = torch.arange(w, device=device)
        y2 = y1 + dy
        x2 = x1 + dx

        # 保证索引不越界
        valid_y = (y2 >= 0) & (y2 < h)
        valid_x = (x2 >= 0) & (x2 < w)
        if not valid_y.any() or not valid_x.any():
            continue

        y1, y2 = y1[valid_y], y2[valid_y]
        x1, x2 = x1[valid_x], x2[valid_x]

        src = idx[y1[:, None], x1]
        dst = idx[y2[:, None], x2]

        # 展平成所有组合对
        src = src.reshape(-1)
        dst = dst.reshape(-1)
        pairs = torch.stack([src, dst], dim=1)
        neighbors.append(pairs)

    neighbors = torch.cat(neighbors, dim=0)  # (M, 2)

    # 计算相邻向量对的距离
    vec1 = pixels[neighbors[:, 0]]
    vec2 = pixels[neighbors[:, 1]]
    dist = torch.norm(vec1 - vec2, dim=1)

    mask = dist < threshold
    connected = neighbors[mask]  # (K, 2)

    # Union-Find 初始化
    parent = torch.arange(N, device=device)

    def fast_union_find(pairs, parent):
        for _ in range(5):
            src, dst = parent[pairs[:, 0]], parent[pairs[:, 1]]
            new_parent = torch.minimum(src, dst)
            parent[pairs[:, 0]] = new_parent
            parent[pairs[:, 1]] = new_parent
        # Path compression
        for _ in range(3):
            parent = parent[parent]
        return parent

    parent = fast_union_find(connected, parent)
    _, labels = torch.unique(parent, return_inverse=True)

    return labels.view(h, w)

class SeDilationConv(nn.Module):
    def __init__(self, in_channels, out_channels, reduction=None):
        super(SeDilationConv, self).__init__()
        
        # ========== define Conv blocks ==========
        self.num_branchs=5
        self.conv1x1=nn.LazyConv2d(
            out_channels=out_channels,
            kernel_size=1,
        )
        self.conv3x3=nn.LazyConv2d(
            out_channels=out_channels,
            kernel_size=3,
            padding=1
        )
        self.conv5x5=nn.LazyConv2d(
            out_channels=out_channels,
            kernel_size=3,
            dilation=2,
            padding=2
        )
        self.conv7x7=nn.LazyConv2d(
            out_channels=out_channels,
            kernel_size=3,
            dilation=3,
            padding=3
        )
        self.dense5x5=nn.LazyConv2d(
            out_channels=out_channels,
            kernel_size=5,
            padding=2
        )
        
        # ========== define Gate block ==========
        if reduction is None:
            reduction=math.floor(math.sqrt(in_channels))
        self.gate=nn.Sequential(
            nn.Conv2d(in_channels, in_channels//reduction, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels//reduction, self.num_branchs, 1)
        )

    def forward(self, x):
        b, c, h, w = x.shape

        # ========== get outputs =========
        out1 = self.conv1x1(x)
        out2 = self.conv3x3(x)
        out3 = self.conv5x5(x)
        out4 = self.conv7x7(x)
        out5 = self.dense5x5(x)

        outs = [out1, out2, out3, out4, out5]  # list of (B, C_out, H, W)

        # ========== get gate weights =========
        weights = self.gate(x)  # (B, num_branches, H, W)
        weights = F.softmax(weights, dim=1)  # softmax across branches

        # ========== weighted sum ==========
        out = 0
        for i in range(self.num_branchs):
            out += outs[i] * weights[:, i:i+1, :, :]  # (B, 1, H, W) -> broadcast to (B, C_out, H, W)

        return out
    
class GnnWithAttn(nn.Module):
    def __init__(self, in_channels, out_channels, num_super_pixel):
        super(GnnWithAttn, self).__init__()
        self.gnn=nn.Sequential(
            SeDilationConv(in_channels=in_channels, out_channels=out_channels),
            SeDilationConv(in_channels=out_channels, out_channels=out_channels)
        )
        self.num_super_pixel=num_super_pixel
        
    def SuperPixelAggregation(self, x, num):
        l=0.0, r=10.0
        while True:
            midth=(l+r)/2
            segmented_image=segment_image(embedding=x, threshold=midth)
            if r-l<1e-1:
                break
            unique_regions=segment_image.unique().numel()
            
        
        
    def forward(self, x):
        return self.gnn(x)
    
if __name__=='__main__':
    x=torch.rand([8, 3, 224, 224])
    model=SeDilationConv(in_channels=3, out_channels=8)
    y=model(x)
    print(y.shape)