import torch
import torch.nn as nn
import torch.nn.functional as F


class L2Pool2d(nn.Module):
    """L2池化层"""

    def __init__(self, kernel_size, stride):
        super().__init__()
        self.pool = nn.LPPool2d(norm_type=2, kernel_size=kernel_size, stride=stride)

    def forward(self, x):
        return self.pool(x)


class DeepDescriptor(nn.Module):
    def __init__(self):
        super().__init__()
        # Layer 1: 输入64x64 → 输出29x29x32
        self.conv1 = nn.Conv2d(1, 32, kernel_size=7, stride=1, padding=0)
        self.pool1 = L2Pool2d(kernel_size=2, stride=2)
        self.tanh1 = nn.Tanh()
        self.norm1 = self._create_subtractive_norm(32)

        # Layer 2: 输入29x29x32 → 输出8x8x64
        self.conv2 = nn.Conv2d(32, 64, kernel_size=6, stride=1, padding=0)
        self.pool2 = L2Pool2d(kernel_size=3, stride=3)
        self.tanh2 = nn.Tanh()
        self.norm2 = self._create_subtractive_norm(64)

        # Layer 3: 输入8x8x64 → 输出1x1x128
        self.conv3 = nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=0)
        self.pool3 = L2Pool2d(kernel_size=4, stride=4)
        self.tanh3 = nn.Tanh()

    def _create_subtractive_norm(self, in_channels):
        """创建减去局部均值的归一化层（5x5高斯模糊）"""
        conv = nn.Conv2d(in_channels, in_channels, kernel_size=5, padding=2,
                         groups=in_channels, bias=False)
        # 初始化高斯核
        sigma = 1.0
        kernel = self._gaussian_kernel(5, sigma)
        kernel = kernel.repeat(in_channels, 1, 1, 1)  # 扩展至所有通道
        with torch.no_grad():
            conv.weight.data = kernel
            conv.weight.requires_grad = False
        return conv

    @staticmethod
    def _gaussian_kernel(size=5, sigma=1.0):
        """生成5x5高斯核"""
        coords = torch.arange(size)
        x, y = torch.meshgrid(coords, coords)
        kernel = torch.exp(-((x - (size - 1) / 2) ** 2 + (y - (size - 1) / 2) ** 2) / (2 * sigma ** 2))
        kernel /= kernel.sum()
        return kernel.unsqueeze(0).unsqueeze(0)  # 形状(1,1,5,5)

    def forward(self, x):
        # Layer 1
        x = self.conv1(x)  # 输出58x58 → 池化后29x29
        x = self.pool1(x)
        x = self.tanh1(x)
        x = x - self.norm1(x)  # 减去局部均值

        # Layer 2
        x = self.conv2(x)  # 输出24x24 → 池化后8x8
        x = self.pool2(x)
        x = self.tanh2(x)
        x = x - self.norm2(x)

        # Layer 3
        x = self.conv3(x)  # 输出4x4 → 池化后1x1
        x = self.pool3(x)
        x = self.tanh3(x)

        return x.view(x.size(0), -1)  # 展平为128维


class SiameseNetwork(nn.Module):
    """Siamese网络结构"""

    def __init__(self, base_cnn):
        super().__init__()
        self.base_cnn = base_cnn

    def forward(self, x1, x2):
        out1 = self.base_cnn(x1)
        out2 = self.base_cnn(x2)
        return out1, out2