from torch import nn, einsum
from einops import rearrange

class CrossTransformer(nn.Module):
    def __init__(
        self,
        img_shape = None,
        args = None,
        dim_key = 128,
        dim_value = 128
    ):
        super().__init__()
  
        self.scale = dim_key ** -0.5
        self.to_qk = nn.Conv2d(img_shape[1], dim_key, 1, bias = False)
        self.to_v = nn.Conv2d(img_shape[1], dim_value, 1, bias = False)
        
        self.img_shape = img_shape
        self.args = args
        self.support = None
    
    def fit_(self,support,support_labels):
        self.support = support
        #及时清除support，减少内存占用
        del support
    
    def forward(self,query):
        """
        dimensions names:
        
        b - batch
        k - num classes
        n - num images in a support class
        c - channels
        h, i - height
        w, j - width
        """
        #Todo:把b去掉！
        b, k = 1,self.args.test_way
        h, w = self.img_shape[2],self.img_shape[3]
        
        
        self.support = self.support.view(-1,
                                      self.img_shape[1],
                                      self.img_shape[2],
                                      self.img_shape[3]
                                      )
        query = query.view(-1,
                             self.img_shape[1],
                             self.img_shape[2],
                             self.img_shape[3]
                             )  
        # #对query提取特征
        # query_repr = model(img_query)
        
        #调用einops.rearrange对张量重新伸缩，使其变为（（b*k*n），c，h，w）的形式，即卷积网络一般的数据形式
        # img_supports = rearrange(img_supports, 'b k n c h w -> (b k n) c h w', b = b)
        # #对support进行特征提取
        # supports_repr = model(img_supports)
        
        #query 求 key 和 value
        query_q, query = self.to_qk(query), self.to_v(query)
        #support 求 key 和 value
        supports_k, self.support = self.to_qk(self.support), self.to_v(self.support)
        #将support变换回原来的格式b k n c h w方便进行后续操作
        supports_k, self.support = map(lambda t: rearrange(t, '(b k n) c h w -> b k n c h w', b = b, k = k), (supports_k, self.support))
        
        
        attn = einsum('b c h w, b k n c i j -> b k h w n i j', query_q, supports_k) * self.scale
        attn = rearrange(attn, 'b k h w n i j -> b k h w (n i j)')
        del supports_k
        del query_q
        #得到计算的注意力机制
        attn = attn.softmax(dim = -1)
        attn = rearrange(attn, 'b k h w (n i j) -> b k h w n i j', i = h, j = w)
        #根据注意力求原型中心
        self.support = einsum('b k h w n i j, b k n c i j -> b k c h w', attn, self.support)
        del attn
        self.support = rearrange(self.support, 'b k c h w -> b k (c h w)')
        query = rearrange(query, 'b c h w -> b () (c h w)')       
        #去掉batch层
        return -(((query - self.support) ** 2).sum(dim = -1) / (h * w)).squeeze(0)
