import math
import torch
import torch.nn as nn
import torchvision
import torch.nn.functional as F
from core.model.attention import *



class ImageEncoder(nn.Module):
    def __init__(self, num_image_embeds):
        super(ImageEncoder, self).__init__()
        model = torchvision.models.resnet152(pretrained=True)
        modules = list(model.children())[:-2]
        self.model = nn.Sequential(*modules)

        # pool_func = (
        #     nn.AdaptiveAvgPool2d
        #     if img_embed_pool_type == "avg"
        #     else nn.AdaptiveMaxPool2d
        # )
        pool_func = nn.AdaptiveAvgPool2d

        if num_image_embeds in [1, 2, 3, 5, 7]:
            self.pool = pool_func((num_image_embeds, 1))
        elif num_image_embeds == 4:
            self.pool = pool_func((2, 2))
        elif num_image_embeds == 6:
            self.pool = pool_func((3, 2))
        elif num_image_embeds == 8:
            self.pool = pool_func((4, 2))
        elif num_image_embeds == 9:
            self.pool = pool_func((3, 3))

    def forward(self, x):
        # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
        out = self.pool(self.model(x))
        out = torch.flatten(out, start_dim=2)
        out = out.transpose(1, 2).contiguous()
        return out  # BxNx2048





class Model_Single_Text(nn.Module):
    def __init__(self, args, vocab_size, pretrained_emb):
        super(Model_Single_Text, self).__init__()


        # LSTM
        self.embedding = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=300
        )

        # Loading the GloVe embedding weights
        self.embedding.weight.data.copy_(torch.from_numpy(pretrained_emb))
        self.embedding.requires_grad_ = False
         
        
        self.adapter_text = nn.Linear(args.word_embed_size,args.hidden_size)
        # Feature size to hid size
        #self.adapter = nn.Linear(args.audio_feat_size, args.hidden_size)
       
        self.enc_list = nn.ModuleList([Block(args, i) for i in range(args.layer)])

        # Flattenting features before proj
        self.attflat_lang = AttFlat(args, 1, merge=True)

        # Classification layers
        self.proj_norm = LayerNorm(args.hidden_size*2)
        self.proj = nn.Linear(2*args.hidden_size, args.ans_size)

    def forward(self, x,if_add_noise):
        x_mask = make_mask(x.unsqueeze(2))  # b,1,1,len  
        # y_mask = make_mask(y)
        # x_mask = b,1,1,1,len 
        
        embedding = self.embedding(x)  # b,seq_len,word_emb_size
        #print(type(embedding.data))
        ## add noise here




        if if_add_noise:
            embedding = self.add_noise(embedding,x_mask)
            #print(type(embedding.data))
       
        x = self.adapter_text(embedding) # b,seq_len, hidden_size

        for i, dec in enumerate(self.enc_list):
            x_m = None
            if i == 0:
                x_m = x_mask 
            x = dec(x, x_m)  # b,len,hidden

        x = self.attflat_lang(
            x,
            None
        )  # batch,hidden*2

        # print("...",x.shape)
         
        # Classification layers
        proj_feat = x
     
        proj_feat = self.proj_norm(proj_feat)
        #print(proj_feat.shape)
        ans = self.proj(proj_feat)
        #print(ans.shape)
        return ans



class Model_Single_Vision(nn.Module):
    def __init__(self, args):
        super(Model_Single_Vision, self).__init__()

         

        self.adapter_img = nn.Linear(2048,args.hidden_size)
        # Feature size to hid size
        #self.adapter = nn.Linear(args.audio_feat_size, args.hidden_size)
       
        self.enc_list = nn.ModuleList([Block(args, i) for i in range(args.layer)])

        # Flattenting features before proj
        self.attflat_lang = AttFlat(args, 1, merge=True)

        # Classification layers
        self.proj_norm = LayerNorm(args.hidden_size*2)
        self.proj = nn.Linear(2*args.hidden_size, args.ans_size)


    def forward(self, x,if_add_noise):
        # x : b,49,2048 
        
        x_mask = make_mask(x)  # b,1,1,len  
        # y_mask = make_mask(y)
        # x_mask = b,1,1,1,len 
    
       
        x = self.adapter_img(x) # b,seq_len, hidden_size

        for i, dec in enumerate(self.enc_list):
            x_m = None
            if i == 0:
                x_m = x_mask 
            x = dec(x, x_m)  # b,len,hidden

        x = self.attflat_lang(
            x,
            None
        )  # batch,hidden*2

        # print("...",x.shape)
         
        # Classification layers
        proj_feat = x
     
        proj_feat = self.proj_norm(proj_feat)
        #print(proj_feat.shape)
        ans = self.proj(proj_feat)
        #print(ans.shape)
        return ans


class ImageEncoder_New(nn.Module):
    def __init__(self):
        super(ImageEncoder_New, self).__init__()
        model = torchvision.models.resnet50(pretrained=True)
        modules = list(model.children())[:-1]
        self.model = nn.Sequential(*modules) # b,2048,1,1

       

    def forward(self, x):
        # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
        out = self.model(x) # b,2048，1,1 
        
        out = out.squeeze(2).squeeze(2) #b,2048
        return out  # BxNx2048

class ImageClf(nn.Module):
    def __init__(self, args):
        super(ImageClf, self).__init__()
        self.args = args
        self.img_encoder = ImageEncoder_New()
        
        self.linear = nn.Linear(2048,3)

    def forward(self, x):
        x = self.img_encoder(x) # b,2048
        out = self.linear(x)
        return out


if __name__ == "__main__":
    net = ImageClf(None)
    net(torch.randn((64,3,244,244)))