import torch
import torch.nn as nn
from torch.nn import functional as F
import math


F_MARGIN = 0.2


class AgreementRouting(nn.Module):
    def __init__(self, ci_num, co_num, n_iter=3):
        super(AgreementRouting, self).__init__()
        self.ci_num = ci_num
        self.co_num = co_num
        self.n_iter = n_iter
        self.b = nn.Parameter(torch.zeros((self.ci_num, self.co_num)))

    def forward(self, x):
        b_size = x.size(dim=0)
        c = F.softmax(self.b, dim=1).unsqueeze(2)
        s = (c * x).sum(dim=1)
        v = F.relu(s)
        b_batch = self.b.expand((b_size, self.ci_num, self.co_num))
        for _ in range(self.n_iter):
            v = v.unsqueeze(1)
            b_batch = b_batch + (v * x).sum(dim=3)
            c = F.softmax(b_batch, dim=2).unsqueeze(3)
            s = (c * x).sum(dim=1)
            v = F.relu(s)
        return v


class CapsLayer(nn.Module):
    def __init__(self, ci_num, ci_len, co_num, co_len):
        super(CapsLayer, self).__init__()
        self.ci_num = ci_num
        self.ci_len = ci_len
        self.co_num = co_num
        self.co_len = co_len
        self.w = nn.Parameter(torch.Tensor(
            self.ci_num, self.ci_len, self.co_num * self.co_len))
        self._reset_parameters()
    
    def _reset_parameters(self):
        stdv = 1.0 / math.sqrt(self.ci_num)
        self.w.data.uniform_(-stdv, stdv)

    def forward(self, x):
        x = x.unsqueeze(2)
        u = x.matmul(self.w)
        u = u.view(-1, self.ci_num, self.co_num, self.co_len)
        return u


class PrimaryCapsLayer(nn.Module):
    def __init__(self, co_chn, co_len):
        super(PrimaryCapsLayer, self).__init__()
        self.co_chn = co_chn
        self.co_len = co_len

    def forward(self, x):
        b_size, _, h_size, w_size = x.size()
        x = x.view(b_size, self.co_chn, self.co_len, h_size, w_size)
        x = x.permute(0, 1, 3, 4, 2).contiguous()
        x = x.view(b_size, -1, self.co_len)
        x = F.relu(x)
        return x


class CapsNet(nn.Module):
    def __init__(self):
        super(CapsNet, self).__init__()
        # n*3*112*112
        self.L1 = nn.Sequential(nn.Conv2d(3, 128, kernel_size=9, stride=2, padding=4),
                                nn.MaxPool2d(kernel_size=2, stride=2),
                                nn.ReLU())
        # n*128*28*28
        self.L2 = nn.Sequential(nn.Conv2d(128, 48, kernel_size=9, stride=2, padding=4),
                                nn.MaxPool2d(kernel_size=2, stride=2),
                                PrimaryCapsLayer(6, 8))
        # n*294*8
        self.L3 = nn.Sequential(CapsLayer(294, 8, 128, 16),
                                AgreementRouting(294, 128))
        # n*128*16
        self.L4 = nn.Sequential(CapsLayer(128, 16, 64, 32),
                                AgreementRouting(128, 64))
        # n*64*32 -> n*2048
        self.L5 = nn.Linear(64 * 32, 128)
        # n*128
        self.f_thres = 0.5
    
    def forward(self, x):
        x = self.L1(x)
        x = self.L2(x)
        x = self.L3(x)
        x = self.L4(x)
        x = x.view(-1, 64 * 32)
        x = self.L5(x)
        x = F.normalize(x, p=2, dim=1)
        return x

    def MyLoss(self, x):
        x = x.view(x.size(0) // 3, 3, -1)
        x = x.permute((1, 0, 2)).contiguous()
        dis_ap = (x[0] - x[1]).pow(2).sum(1)
        dis_an = (x[0] - x[2]).pow(2).sum(1)
        self.f_thres = 0.9 * self.f_thres + 0.1 * (dis_an.mean() + dis_ap.mean()) / 2.0
        loss = F.relu(dis_ap - dis_an + F_MARGIN).sum()
        return loss

    def MyCor(self, nn_outputs, nn_flags):
        nn_outdis = nn_outputs.view(nn_outputs.size(0) // 2, 2, -1)
        nn_outdis = nn_outdis.permute((1, 0, 2)).contiguous()
        nn_outdis = (nn_outdis[0] - nn_outdis[1]).pow(2).sum(1)
        nn_outnear = (nn_outdis < self.f_thres)
        return (nn_outnear == nn_flags)
