import math
import torch
import mindspore as ms
import numpy as np
from mindspore import  nn,ops
from  mindspore import context
from ms_resnet import resnet45
from ms_transformer import PositionalEncoding

_default_tfmer_cfg = dict(d_model=512, nhead=8, d_inner=2048, # 1024
                          dropout=0.1, activation='relu')

class ResTranformer(nn.Cell):
    def __init__(self, config):
        super().__init__()
        self.resnet = resnet45()

        self.d_model = _default_tfmer_cfg['d_model']
        nhead =  _default_tfmer_cfg['nhead']
        d_inner =  _default_tfmer_cfg['d_inner']
        dropout =  _default_tfmer_cfg['dropout']
        activation =  _default_tfmer_cfg['activation']
        #num_layers = ifnone(config.model_vision_backbone_ln, 2)
        num_layers = 3
        self.encoder_mask = ms.Tensor(np.ones((4, 256, 256)), ms.float16)
        self.pos_encoder = PositionalEncoding(self.d_model, max_len=8*32)
        
        self.transformer = nn.TransformerEncoder(batch_size=4,num_layers=num_layers,hidden_size=self.d_model, num_heads=nhead, 
                ffn_hidden_size=d_inner, hidden_dropout_rate=dropout,attention_dropout_rate=dropout, hidden_act=activation,seq_length=256)
        #self.transformer = TransformerEncoder(encoder_layer, num_layers)

    def construct(self, images):
        #print(images.shape)
        feature = self.resnet(images)     
        #print("feature.shape:")
        #print(feature.shape)
        n, c, h, w = feature.shape
        feature = feature.view(n, c, -1)
        feature = feature.transpose(2,0,1)
        #feature = feature.transpose(0,2,1)
        feature = self.pos_encoder(feature)
        #x: [sequence length, batch size, embed dim]
        #256 1 512
        feature = feature.transpose(1,0,2)
        feature, past = self.transformer(feature,self.encoder_mask)
        feature = feature.transpose(1,2,0)
        feature = feature.view(n, c, h, w)
        return feature

        