import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
import time
import math

# -------- Segment Logic -------- #
def segment_image(embedding, threshold):
    b, c, h, w = embedding.shape
    assert b == 1, "Only batch size 1 supported"
    device = embedding.device
    embedding = embedding[0]  # (c, h, w)

    pixels = embedding.view(c, -1).T  # (N, C)
    N = h * w

    # 构建8邻域连接
    idx = torch.arange(N, device=device).reshape(h, w)
    neighbors = []
    directions = [(-1, -1), (-1, 0), (-1, 1),
                  (0, -1),          (0, 1),
                  (1, -1),  (1, 0), (1, 1)]

    for dy, dx in directions:
        y1 = torch.arange(h, device=device)
        x1 = torch.arange(w, device=device)
        y2 = y1 + dy
        x2 = x1 + dx

        valid_y = (y2 >= 0) & (y2 < h)
        valid_x = (x2 >= 0) & (x2 < w)
        if not valid_y.any() or not valid_x.any():
            continue

        y1, y2 = y1[valid_y], y2[valid_y]
        x1, x2 = x1[valid_x], x2[valid_x]

        src = idx[y1[:, None], x1]
        dst = idx[y2[:, None], x2]
        pairs = torch.stack([src.reshape(-1), dst.reshape(-1)], dim=1)
        neighbors.append(pairs)

    neighbors = torch.cat(neighbors, dim=0)

    vec1 = pixels[neighbors[:, 0]]
    vec2 = pixels[neighbors[:, 1]]
    dist = torch.norm(vec1 - vec2, dim=1)

    mask = dist < threshold
    connected = neighbors[mask]

    # Union-Find
    parent = torch.arange(N, device=device)

    def fast_union_find(pairs, parent):
        for _ in range(5):
            src, dst = parent[pairs[:, 0]], parent[pairs[:, 1]]
            new_parent = torch.minimum(src, dst)
            parent[pairs[:, 0]] = new_parent
            parent[pairs[:, 1]] = new_parent
        for _ in range(3):
            parent = parent[parent]
        return parent

    parent = fast_union_find(connected, parent)
    _, labels = torch.unique(parent, return_inverse=True)

    return labels.view(h, w)  # (h, w)

# -------- 可视化 -------- #
def visualize_labels(labels):
    lbl = labels.detach().cpu().numpy()
    norm_lbl = lbl.astype(np.float32) / lbl.max()
    colored = cm.tab20(norm_lbl)
    plt.imshow(colored)
    plt.axis('off')
    plt.title("Segmented Labels")
    plt.show()
    plt.savefig('fig.png')

# -------- 核心逻辑：引导拼接 -------- #
def segment_with_teacher(image, embedding, threshold, blend_ratio=0.8):
    """
    image: 原图 (1, 3, H, W)
    embedding: 网络输出 (1, C, h, w)
    threshold: 粘包距离阈值
    blend_ratio: 0~1之间权重，label_map占多大比例
    return: 引导融合结果 (1, 3, h, w)
    """

    _, _, h, w = embedding.shape

    # Resize 原图到 embedding的形状
    image_resized = F.interpolate(image, size=(h, w), mode='bilinear', align_corners=False)
    image_resized_64_channels = image_resized.repeat(1, math.ceil(64 / 3), 1, 1)  # 扩展至64通道
    image_resized_64_channels = image_resized_64_channels[:, :64, :, :]  # 截取到64通道

    # 分割：将 image_resized 和 embedding 按比例拼接
    blended_input = blend_ratio * image_resized_64_channels + (1 - blend_ratio) * embedding

    # 使用分割逻辑获取标签
    labels = segment_image(blended_input, threshold)  # (h, w)
    return labels
    

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 原图 & 网络输出
    image = torch.load(r'image_tensor.pt').unsqueeze(0)  # 假设已经有保存的 image
    image = image.to(device)

    embedding = torch.randn(1, 64, 256, 256, device=device)  # 假设你的 embedding 是这个形状
    embedding = embedding / (embedding.norm(dim=1, keepdim=True) + 1e-6)

    threshold = 2 - 1e-18
    start = time.time()

    # 引导融合
    guided_result = segment_with_teacher(image, embedding, threshold, blend_ratio=1)
    naive_result=segment_image(image, threshold=1)
    end = time.time()
    print("Time cost:", end - start)
    print("Guided result shape:", naive_result.shape)
    print(naive_result)
