import torch.nn as nn
import torch
from model.biLstm import PhraseAttention, RNNEncoder
from model.vggnet import vgg_pool7
from opt import parse_opt

class NMN(nn.Module):
    def __init__(self,opt,vis_dim,loc_dim,lang_dim):
        super(NMN, self).__init__()
        self.visual_encoder = vgg_pool7()
        #self.visual_encoder = vgg_pool7()


        self.lang_attn = PhraseAttention(1024)
        self.lang_encoder = RNNEncoder(vocab_size=opt['voca_size'],
                                  word_embedding_size=opt['word_embedding_size'],
                                  word_vec_size=opt['word_vec_size'],
                                  hidden_size=opt['rnn_hidden_size'],
                                  bidirectional=opt['bidirectional'] > 0,
                                  input_dropout_p=opt['word_drop_out'],
                                  dropout_p=opt['rnn_drop_out'],
                                  n_layers=opt['rnn_num_layers'],
                                  rnn_type=opt['rnn_type'],
                                  variable_lengths=0)
        self.vis_embed = nn.Sequential(\
            nn.Linear((vis_dim+loc_dim)*49, lang_dim),
            nn.BatchNorm1d(lang_dim),
            nn.ReLU(),
            nn.Dropout(opt['rnn_drop_out']),
            nn.Linear(lang_dim, lang_dim),
            nn.BatchNorm1d(lang_dim),
        )

        self.fc = nn.Linear(lang_dim, 1)
    def forward(self, imcrop,spatial,text_seq):
        '''
        imcrop torch.Size([12, 25, 3, 224, 224])
        spatial torch.Size([12, 25, 5])
        text_seq 8 , (batch,seq_len)
        label torch.Size([12, 1])
        raw_im torch.Size([12, 50, 50, 3])
        raw_sent 8
        '''

        batch, num, channel, h, w = imcrop.shape
        sp_ = spatial.unsqueeze(dim=3).expand(batch, num, 5, 7 * 7).double()  # (batch,num,5,49)

        # vis  todevice
        v_fes = torch.zeros(batch, num, 512, 7, 7,device=spatial.device)


        for n in range(num):
            # 一开始channel位置写错v_feature = self.visual_encoder(imcrop[:, n, ...].view(batch, h, w, channel))  # ([batch, 512, 7, 7])
            v_feature = self.visual_encoder(imcrop[:, n, ...])
            v_fes[:, n, ...] = v_feature
        v_fes = v_fes.reshape(batch, num, 512, -1).double()  # (batch,num,512,49)  num * proposal
        v_loc = torch.cat([sp_, v_fes], dim=2)  # (batch,n,517,49)


        # lang
        text_seq = text_seq.squeeze(2).long()
        output, hidden, embed = self.lang_encoder(text_seq)
        # output (batch,T,1024)  embed(batch,T,512)
        attn, weighted_emb = self.lang_attn(output, embed, text_seq)

        #norm
        v_loc = nn.functional.normalize(v_loc)
        weighted_emb = nn.functional.normalize(weighted_emb)

        batch,n,v_dim,v_s = v_loc.shape

        v_loc = v_loc.view(batch*n, -1).float() # (batch*n,517*49)
        v_loc = self.vis_embed(v_loc)  #(batch*n,512)
        v_loc = v_loc.view(batch,n,-1)

        mul = v_loc*weighted_emb.unsqueeze(1)
        score = self.fc(mul).squeeze(2)
        return score,attn  # argmax(n)

if __name__ == '__main__':
    import torch

    imcrop = torch.rand((2, 25, 3,224, 224))
    spi = torch.rand((2,25,5))
    text_seq = torch.rand((2, 20,1)).long()


    model = NMN(vars(parse_opt()),512,5,512)
    score,attn = model(imcrop,spi,text_seq)
    score= score.argmax(1)
    print(score)