import torch
import torch.nn as nn
import torch.nn.functional as F

class GlobalPiCANet(nn.Module):
    def __init__(self, in_channels, inter_channels=64):
        super(GlobalPiCANet, self).__init__()
        self.query_conv = nn.Conv2d(in_channels, inter_channels, kernel_size=1)
        self.key_conv   = nn.Conv2d(in_channels, inter_channels, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels, inter_channels, kernel_size=1)
    
        self.out_conv   = nn.Conv2d(inter_channels, in_channels, kernel_size=1)
    
    def forward(self, x):
        # x: 输入特征图，维度是 (B, C, H, W)
        B, C, H, W = x.size()

        query = self.query_conv(x)  # (B, d, H, W)
        key   = self.key_conv(x)    # (B, d, H, W)
        value = self.value_conv(x)  # (B, d, H, W)
        
        query = query.view(B, -1, H * W)       # (B, d, N)
        key   = key.view(B, -1, H * W)         # (B, d, N)
        value = value.view(B, -1, H * W)       # (B, d, N)
        
        attn = torch.bmm(query.permute(0, 2, 1), key)  # (B, N, N)
        
        attn = F.softmax(attn, dim=-1)  # (B, N, N)
        
        out = torch.bmm(value, attn.permute(0, 2, 1))  # (B, d, N)
        
        out = out.view(B, -1, H, W)  # (B, d, H, W)
        out = self.out_conv(out)    # (B, C, H, W)
        out = out + x             # 残差连接
        return out

class LocalPiCANet(nn.Module):
    def __init__(self, in_channels, inter_channels=64, ksize=7):
        super(LocalPiCANet, self).__init__()
        assert ksize % 2 == 1, "ksize must be odd for center alignment"

        self.query_conv = nn.Conv2d(in_channels, inter_channels, kernel_size=1)
        self.key_conv   = nn.Conv2d(in_channels, inter_channels, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels, inter_channels, kernel_size=1)
        self.out_conv   = nn.Conv2d(inter_channels, in_channels, kernel_size=1)

        self.ksize = ksize
        self.pad = ksize // 2
        self.unfold = nn.Unfold(kernel_size=ksize, padding=self.pad)

    def forward(self, x):
        B, C, H, W = x.shape
        d = self.query_conv.out_channels  # inter_channels

        query = self.query_conv(x)  # (B, d, H, W)
        key   = self.key_conv(x)
        value = self.value_conv(x)

        query = query.view(B, d, H * W).transpose(1, 2)  # (B, H*W, d)

        key_unfold = self.unfold(key)    # (B, d*K*K, H*W)
        value_unfold = self.unfold(value)  # (B, d*K*K, H*W)

        key_unfold = key_unfold.view(B, d, self.ksize * self.ksize, H * W).permute(0, 3, 2, 1)  # (B, H*W, K*K, d)
        value_unfold = value_unfold.view(B, d, self.ksize * self.ksize, H * W).permute(0, 3, 2, 1)  # (B, H*W, K*K, d)

        query = query.unsqueeze(2)  # (B, H*W, 1, d)

        attn = torch.sum(query * key_unfold, dim=-1) / (d ** 0.5)  # (B, H*W, K*K)
        attn = F.softmax(attn, dim=-1)

        out = torch.sum(attn.unsqueeze(-1) * value_unfold, dim=2)  # (B, H*W, d)
        out = out.transpose(1, 2).view(B, d, H, W)  # (B, d, H, W)

        out = self.out_conv(out)  # (B, C, H, W)
        return out

# class MultiScalePiCANet(nn.Module):
#     def __init__(self, in_channels, inter_channels=64, local_ksize=5, mid_ksize=13):
#         super(MultiScalePiCANet, self).__init__()
        
#         # 三个分支分别为不同感受野的上下文注意
#         self.local = LocalPiCANet(in_channels, inter_channels, ksize=local_ksize)
#         self.mid = LocalPiCANet(in_channels, inter_channels, ksize=mid_ksize)
#         self.global_ = GlobalPiCANet(in_channels, inter_channels)

#         # 为每个像素生成 scale 选择权重 (3个分支)
#         self.scale_selector = nn.Sequential(
#             nn.Conv2d(in_channels, inter_channels, kernel_size=1),
#             nn.ReLU(inplace=True),
#             nn.Conv2d(inter_channels, 3, kernel_size=1)  # 输出3个尺度权重
#         )
    
#     def forward(self, x):
#         # 三个尺度的上下文增强输出
#         local_feat = self.local(x)     # (B, C, H, W)
#         mid_feat   = self.mid(x)
#         global_feat = self.global_(x)

#         # 堆叠成 (B, 3, C, H, W)
#         features = torch.stack([local_feat, mid_feat, global_feat], dim=1)  # shape: (B, 3, C, H, W)

#         # 计算每个像素的尺度注意权重 β（softmax 归一化）
#         scale_logits = self.scale_selector(x)  # (B, 3, H, W)
#         scale_weights = F.softmax(scale_logits, dim=1)  # (B, 3, H, W)

#         # 加权融合三个尺度
#         scale_weights = scale_weights.unsqueeze(2)  # (B, 3, 1, H, W)
#         out = (features * scale_weights).sum(dim=1)  # (B, C, H, W)
        
#         if self.training:
#             with torch.no_grad():
#                 avg_weights = scale_weights.mean(dim=[0, 3, 4])
#                 log_str = f"[Scale Weights Avg] Local: {avg_weights[0].item():.3f}, Mid: {avg_weights[1].item():.3f}, Global: {avg_weights[2].item():.3f}\n"
#                 with open("scale_weights_log.txt", "a") as f:
#                     f.write(log_str)
#         return out

class MultiScalePiCANet(nn.Module):
    def __init__(self, in_channels, inter_channels=64, local_ksize=5, mid_ksize=13,
                 use_local=True, use_mid=True, use_global=True):
        super(MultiScalePiCANet, self).__init__()
        
        self.use_local = use_local
        self.use_mid = use_mid
        self.use_global = use_global

        self.branches = []

        if use_local:
            self.local = LocalPiCANet(in_channels, inter_channels, ksize=local_ksize)
            self.branches.append("local")
        if use_mid:
            self.mid = LocalPiCANet(in_channels, inter_channels, ksize=mid_ksize)
            self.branches.append("mid")
        if use_global:
            self.global_ = GlobalPiCANet(in_channels, inter_channels)
            self.branches.append("global")

        self.num_branches = len(self.branches)

        self.scale_selector = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, kernel_size=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(inter_channels, self.num_branches, kernel_size=1)  # 输出 N 个分支权重
        )

    def forward(self, x):
        feature_list = []

        # 分支特征计算
        if self.use_local:
            local_feat = self.local(x)
            feature_list.append(local_feat)
        if self.use_mid:
            mid_feat = self.mid(x)
            feature_list.append(mid_feat)
        if self.use_global:
            global_feat = self.global_(x)
            feature_list.append(global_feat)

        features = torch.stack(feature_list, dim=1)  # (B, N, C, H, W)

        scale_logits = self.scale_selector(x)  # (B, N, H, W)
        scale_weights = F.softmax(scale_logits, dim=1)  # (B, N, H, W)
        scale_weights_exp = scale_weights.unsqueeze(2)  # (B, N, 1, H, W)

        out = (features * scale_weights_exp).sum(dim=1)  # (B, C, H, W)

        if self.training:
            with torch.no_grad():
                avg_weights = scale_weights.mean(dim=[0, 2, 3])  # (N,)
                log_str = f"[Scale Weights Avg] " + ", ".join(
                    [f"{name.capitalize()}: {avg_weights[i].item():.3f}" for i, name in enumerate(self.branches)]
                ) + "\n"
                with open("scale_weights_log.txt", "a") as f:
                    f.write(log_str)

        return out, scale_weights.detach()   











