import math 
import torch 
import torch.nn as nn 

class Affinity(nn.Module):
    def __init__(self, hidden_size):
        super().__init__()

        self.key_layer = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, stride=1, padding=1)

class MemoryReader(nn.Module):
    def __init__(self):
        super().__init__()
 
    def get_affinity(self, mk, qk):
        B, CK, T, H, W = mk.shape
        mk = mk.flatten(start_dim=2)
        qk = qk.flatten(start_dim=2)

        # See supplementary material
        ## a_sq: (B, 1, N)
        a_sq = mk.pow(2).sum(1).unsqueeze(2)
        ## qk: (B, CK, N)
        ## mk: (B, CK, N)
        ## ab: (B, N, N)
        ab = mk.transpose(1, 2) @ qk

        affinity = (2*ab-a_sq) / math.sqrt(CK)   # B, THW, HW
        
        # softmax operation; aligned the evaluation style
        maxes = torch.max(affinity, dim=1, keepdim=True)[0]
        x_exp = torch.exp(affinity - maxes)
        x_exp_sum = torch.sum(x_exp, dim=1, keepdim=True)
        affinity = x_exp / x_exp_sum 

        return affinity

    def readout(self, affinity, mv, qv):
        B, CV, T, H, W = mv.shape

        mo = mv.view(B, CV, T*H*W) 
        mem = torch.bmm(mo, affinity) # Weighted-sum B, CV, HW
        mem = mem.view(B, CV, H, W)

        mem_out = torch.cat([mem, qv], dim=1)

        return mem_out

