import torch
import torch.nn as nn
import torch.nn.functional as F
from config import options
import numpy as np


def squash(input_tensor, dim=-1, epsilon=1e-7):
    squared_norm = (input_tensor ** 2).sum(dim=dim, keepdim=True)
    safe_norm = torch.sqrt(squared_norm + epsilon)
    scale = squared_norm / (1 + squared_norm)
    unit_vector = input_tensor / safe_norm
    return scale * unit_vector


class SimNet(nn.Module):
    def __init__(self, args):
        super(SimNet, self).__init__()
        self.args = args

        self.sim_net = nn.Sequential(
            nn.Linear(options.digit_cap_dim, args.h1),
            nn.ReLU(inplace=True),
            nn.Linear(args.h1, args.h2),
            nn.ReLU(inplace=True),
            nn.Linear(args.h2, args.img_h * args.img_w),
            nn.Sigmoid(),
        )

    def forward(self, imgs):
        x = self.sim_net()
        return x


class PrimaryCapsLayer(nn.Module):
    def __init__(
        self, in_channels, out_channels, kernel_size, stride, cap_dim, num_cap_map
    ):
        super(PrimaryCapsLayer, self).__init__()

        self.capsule_dim = cap_dim  # 每个胶囊的维度是8
        self.num_cap_map = num_cap_map  # 32个胶囊
        self.conv_out = nn.Conv2d(
            in_channels, out_channels, kernel_size, stride, padding=0
        )

    def forward(self, x):
        """
        不涉及动态路由，只是把卷积结果分组了
        """
        batch_size = x.size(0)  # 256
        outputs = self.conv_out(x)  # [256, 256, 20, 20] --> [256, 256, 6, 6] 核为9，步长为1
        map_dim = outputs.size(-1)  # 6
        outputs = outputs.view(
            batch_size, self.capsule_dim, self.num_cap_map, map_dim, map_dim
        )  # [256, 8, 32, 6, 6]
        # self.capsule_dim, self.num_cap_map 两者相乘等于卷积核的数目
        outputs = (  # 最后一维 --> 6*6
            outputs.view(batch_size, self.capsule_dim, self.num_cap_map, -1)
            .transpose(1, 2)
            .transpose(2, 3)
        )  # [256, 32, 36, 8] == [bsz, num_cap_map, map_dim*map_dim, capsule_dim]
        # 直观：[bsz，胶囊个数，长×高，深度]
        outputs = squash(outputs)
        return outputs


class DigitCapsLayer(nn.Module):
    def __init__(
        self,
        num_digit_cap,
        num_prim_cap,
        num_prim_map,
        in_cap_dim,
        out_cap_dim,
        num_iterations,
    ):
        super(DigitCapsLayer, self).__init__()
        self.num_prim_cap = num_prim_cap
        self.num_prim_map = num_prim_map
        self.num_digit_cap = num_digit_cap  # 10个胶囊
        self.num_iterations = num_iterations
        self.out_cap_dim = out_cap_dim

        if options.share_weight:
            self.W = nn.Parameter(
                0.01
                * torch.randn(1, num_digit_cap, num_prim_map, 1, out_cap_dim, in_cap_dim)
            )
            # [1, 10, 32, 1, 16, 8]
        else:
            self.W = nn.Parameter(
                0.01
                * torch.randn(
                    1,
                    num_digit_cap,
                    num_prim_map,
                    num_prim_cap,
                    out_cap_dim,
                    in_cap_dim,
                )
            )
            # [1, 10, 32, 36, 16, 8]

    def forward(self, x):
        """
        动态路由！
        之前一整张图片的特征已经由一组胶囊表示：[胶囊个数，长×高，深度]。
        就是对K个卷积结果按照胶囊个数进行分组，得到胶囊的初始形态，K=胶囊个数*深度；
        然后使用动态路由算法提取特征。
            1. softmax b得到权重c
            2. 权重c直接和上一步特征加权，sum掉旧胶囊的维度，得到新的特征v：[bsz，新胶囊个数，维度]，并squash
            3. 计算b：b += [256, 10个新胶囊, 32个旧胶囊*旧胶囊长×高36, 深度16] mat [256, 10个新胶囊, 16, 1]
               就是最后一个维度16-->1，本质上就是计算新胶囊和旧胶囊每一维度（长×宽）之间的相似性
            4. 循环，最后返回 新的特征v。
        参数量不大：
            1. W
            2. b
        """
        batch_size = x.size(0)  # [256, 32, 36, 8] == [bsz，胶囊个数，长×高，深度]
        u = x[:, None, :, :, :, None]  # [256, 1, 32, 36, 8, 1] 巧妙增加两个维度

        # [1, 10, 32, 1, 16, 8] mat [256, 1, 32, 36, 8, 1] = [256, 10个新胶囊, 32个旧胶囊, 旧胶囊长×高36, 深度16]
        # W: (1,   num_digit_cap, num_prim_map,      1,   out_cap_dim,       in_cap_dim)
        # u: [bsz, 1,             num_cap_map胶囊个数, 1,  capsule_dim胶囊维度, 1]
        u_hat = torch.matmul(self.W, u).squeeze(-1)  # 主要计算是最后两维度相乘，完成一次线性变换

        # detach u_hat during routing iterations to prevent gradients from flowing
        temp_u_hat = u_hat.detach()  # [256, 10, 32, 36, 16]

        b = torch.zeros(
            batch_size, self.num_digit_cap, u_hat.size(2), u_hat.size(3), 1
        ).cuda()  # [256, 10, 32, 36, 1] 本质是最后的一维，表示的是相似性！

        for i in range(self.num_iterations - 1):
            c = F.softmax(b, dim=1)
            s = (
                (c * temp_u_hat).sum(dim=2).sum(dim=2)
            )  # [256, 10, 16], 依次sum掉：32个旧胶囊, 旧胶囊长×高36
            v = squash(s)  # [256, 10, 16] 10个新胶囊，每一个16维
            # [256, 10个新胶囊, 32个旧胶囊*旧胶囊长×高36, 深度16] mat [256, 10, 16, 1]
            uv = torch.matmul(
                temp_u_hat.view(batch_size, self.num_digit_cap, -1, self.out_cap_dim),
                v.unsqueeze(-1),
            )  # [256, 10, 1152, 1]
            b += uv.view(b.shape)  # [256, 10, 32, 36, 1]
        # 最后一次循环，需要梯度
        c = F.softmax(b, dim=3)
        s = (c * u_hat).sum(dim=2).sum(dim=2)
        v = squash(s)
        return v  # [256, 10个新胶囊, 维度16]


class CapsuleNet(nn.Module):
    def __init__(self, args):
        super(CapsuleNet, self).__init__()
        self.args = args

        # convolution layer
        self.conv1 = nn.Conv2d(
            in_channels=args.img_c,  # 1
            out_channels=args.f_conv1,  # 256
            kernel_size=args.k_conv1,  # 9
            stride=args.s_conv1,  # 1
        )

        # primary capsule layer
        assert args.f_prim % args.primary_cap_dim == 0
        self.num_prim_map = int(args.f_prim / args.primary_cap_dim)
        self.primary_capsules = PrimaryCapsLayer(
            in_channels=args.f_conv1,  # 256
            out_channels=args.f_prim,  # 256
            kernel_size=args.k_prim,  # 9
            stride=args.s_prim,  # 2
            cap_dim=args.primary_cap_dim,  # 8
            num_cap_map=self.num_prim_map,  # 32
        )
        num_prim_cap = int(
            (args.img_h - 2 * (args.k_prim - 1))
            * (args.img_h - 2 * (args.k_prim - 1))
            / (args.s_prim * args.s_prim)
        )

        self.digit_capsules = DigitCapsLayer(
            num_digit_cap=args.num_classes,  # 10
            num_prim_cap=num_prim_cap,  # 36
            num_prim_map=self.num_prim_map,  # 32
            in_cap_dim=args.primary_cap_dim,  # 8
            out_cap_dim=args.digit_cap_dim,  # 16
            num_iterations=args.num_iterations,  # 3
        )

        if args.add_decoder:
            self.decoder = nn.Sequential(
                nn.Linear(16 * args.num_classes, args.h1),  # (16*10, 512)
                nn.ReLU(inplace=True),
                nn.Linear(args.h1, args.h2),  # (512, 1024)
                nn.ReLU(inplace=True),
                nn.Linear(args.h2, args.img_h * args.img_w),  # (1024, 28*28)
                nn.Sigmoid(),
            )

    def forward(self, imgs, y=None):
        # imgs: [256, 1, 28, 28]
        x = F.relu(self.conv1(imgs), inplace=True)  # [256, 256, 20, 20]
        x = self.primary_capsules(x)  # [bsz，胶囊个数，长×高，深度]
        x = self.digit_capsules(x)  # 动态路由！[256, 10个新胶囊, 维度16]

        v_length = (x ** 2).sum(dim=-1) ** 0.5

        _, y_pred = v_length.max(dim=1)
        y_pred_ohe = F.one_hot(y_pred, self.args.num_classes)

        if y is None:
            y = y_pred_ohe

        img_reconst = torch.zeros_like(imgs)
        if self.args.add_decoder:
            img_reconst = self.decoder((x * y[:, :, None].float()).view(x.size(0), -1))

        return y_pred_ohe, img_reconst, v_length


class CapsuleLoss(nn.Module):
    def __init__(self, args):
        super(CapsuleLoss, self).__init__()
        self.args = args

    def forward(self, images, labels, v_c, reconstructions):
        present_error = (
            F.relu(self.args.m_plus - v_c, inplace=True) ** 2
        )  # max(0, m_plus-||v_c||)^2
        absent_error = (
            F.relu(v_c - self.args.m_minus, inplace=True) ** 2
        )  # max(0, ||v_c||-m_minus)^2

        l_c = (
            labels.float() * present_error
            + self.args.lambda_val * (1.0 - labels.float()) * absent_error
        )
        margin_loss = l_c.sum(dim=1).mean()

        reconstruction_loss = 0
        if self.args.add_decoder:
            assert torch.numel(images) == torch.numel(reconstructions)
            images = images.view(reconstructions.size()[0], -1)
            reconstruction_loss = torch.mean((reconstructions - images) ** 2)

        return margin_loss + self.args.alpha * reconstruction_loss
