#import torch
#import torch.nn as nn
import mindspore as ms
from mindspore import nn
from src.ms_transformer import PositionalEncoding



def encoder_layer(in_c, out_c, k=3, s=2, p=1):
    return nn.SequentialCell(nn.Conv2d(in_c, out_c, k, s, pad_mode="pad",padding =p , has_bias=True),
                         nn.BatchNorm2d(out_c,momentum=0.1),
                         nn.ReLU())

class ms_upsample_scale(nn.Cell):
    def __init__(self, scale_factor, align_corners):
        super().__init__()
        self.scale_factor = scale_factor
        self.align_corners = align_corners
        

    def construct(self,x):
        _1,_2, height, width = x.shape
        new_height = self.scale_factor * height
        new_width = self.scale_factor * width
        resize = ms.ops.ResizeNearestNeighbor(size=(new_height,new_width),align_corners= self.align_corners)
        x = resize(x)
        return x

class ms_upsample_size(nn.Cell):
    def __init__(self, size, align_corners):
        super().__init__()
        self.size = size
        self.align_corners = align_corners
        

    def construct(self,x):
        resize = ms.ops.ResizeNearestNeighbor(size = self.size, align_corners= self.align_corners)
        x = resize(x)
        return x


#mindspore upsample ResizeBilinear 只有bilinear  
def decoder_layer1(in_c, out_c, k=3, s=1, p=1, mode='nearest', scale_factor=None, size=None):
    align_corners = False if mode=='nearest' else True
    return nn.SequentialCell(ms_upsample_scale(scale_factor,align_corners=align_corners),
                         nn.Conv2d(in_c, out_c, k, s, pad_mode="pad",padding = p , has_bias=True),
                         nn.BatchNorm2d(out_c,momentum=0.1),
                         nn.ReLU())

def decoder_layer2(in_c, out_c, k=3, s=1, p=1, mode='nearest', scale_factor=None, size=None):
    align_corners = False if mode=='nearest' else True
    return nn.SequentialCell(ms_upsample_size(size, align_corners = align_corners),
                         nn.Conv2d(in_c, out_c, k, s, pad_mode="pad",padding = p , has_bias=True),
                         nn.BatchNorm2d(out_c,momentum=0.1),
                         nn.ReLU())

class PositionAttention(nn.Cell):
    def __init__(self, max_length, in_channels=512, num_channels=64, 
                 h=8, w=32, mode='nearest', **kwargs):
        super().__init__()
        self.max_length = max_length
        # self.k_encoder = nn.CellList(
        #     encoder_layer(in_channels, num_channels, s=(1, 2)),
        #     encoder_layer(num_channels, num_channels, s=(2, 2)),
        #     encoder_layer(num_channels, num_channels, s=(2, 2)),
        #     encoder_layer(num_channels, num_channels, s=(2, 2))
        # )
        self.k_encoder1 = encoder_layer(in_channels, num_channels, s=(1, 2))
        self.k_encoder2 = encoder_layer(num_channels, num_channels, s=(2, 2))
        self.k_encoder3 = encoder_layer(num_channels, num_channels, s=(2, 2))
        self.k_encoder4 = encoder_layer(num_channels, num_channels, s=(2, 2))

        # self.k_decoder = nn.CellList(
        #     decoder_layer1(num_channels, num_channels, scale_factor=2, mode=mode),
        #     decoder_layer1(num_channels, num_channels, scale_factor=2, mode=mode),
        #     decoder_layer1(num_channels, num_channels, scale_factor=2, mode=mode),
        #     decoder_layer2(num_channels, in_channels, size=(h, w), mode=mode)
        # )
        self.k_decoder1 = decoder_layer1(num_channels, num_channels, scale_factor=2, mode=mode)
        self.k_decoder2 = decoder_layer1(num_channels, num_channels, scale_factor=2, mode=mode)
        self.k_decoder3 = decoder_layer1(num_channels, num_channels, scale_factor=2, mode=mode)
        self.k_decoder4 = decoder_layer2(num_channels, in_channels, size=(h, w), mode=mode)

        self.pos_encoder = PositionalEncoding(in_channels, dropout = 1.0, max_len=max_length)
        self.project = nn.Dense(in_channels, in_channels, weight_init ='uniform',bias_init='uniform')

    def construct(self, x):
        N, E, H, W = x.shape
        k, v = x, x  # (N, E, H, W)
        
        # print("**************")
        # print(len(self.k_decoder))
        # print(len(self.k_encoder))
        # calculate key vector
        features = []
        # for i in range(0, 4):
        #     print("i:",i)
        #     k = self.k_encoder[i](k)
        #     features.append(k)
        # print("1*")
        k = self.k_encoder1(k)
        
        features.append(k)
        # print("2*")
        k = self.k_encoder2(k)
        
        features.append(k)
        # print("3*")
        k = self.k_encoder3(k)
        
        features.append(k)
        # print("4*")
        k = self.k_encoder4(k)
        features.append(k)        
        # print("self.k_encoder:",len(self.k_encoder))
        # print("self.k_decoder:",len(self.k_decoder))
        
        #单元测试暂时注释掉
        # for i in range(0, len(self.k_decoder) - 1):
        #     print("i:",i)
        #     k = self.k_decoder[i](k)
        #     k = k + features[len(self.k_decoder) - 2 - i]
        # k = self.k_decoder[3](k)
        k = self.k_decoder1(k)
        k = k + features[2]
        k = self.k_decoder2(k)
        k = k + features[1]
        k = self.k_decoder3(k)
        k = k + features[0]
        k = self.k_decoder4(k)


        

        k_1,k_2,k_3,k_4 = k.shape
        # calculate query vector
        # TODO q=f(q,k)
        zeros = ms.ops.Zeros()
        x_zeros = zeros((self.max_length, N, E),ms.float32)  # (T, N, E)
        q = self.pos_encoder(x_zeros)  # (T, N, E)
        q = q.transpose(1,0,2)
        q = self.project(q)  # (N, T, E)
        
        # calculate attention
        k_attn = k.view(k_1,k_2,-1)
        batmatmul = ms.ops.BatchMatMul()
        attn_scores = batmatmul(q, k_attn)  # (N, T, (H*W))
        attn_scores = attn_scores / (E ** 0.5)
        softmax_attn = nn.Softmax()
        attn_scores = softmax_attn(attn_scores)
        v = v.transpose(0,2,3,1)
        v = v.view(N, -1, E)  # (N, (H*W), E)
        attn_vecs = batmatmul(attn_scores, v)  # (N, T, E)

        return attn_vecs, attn_scores.view(N, -1, H, W)
