import torch
from torch import nn
import torch.nn.functional as F

width = 300
height = 150
patch_l = 25
patch_n = int(width * height / patch_l / patch_l)
n_dim = 256
text_l = 32
vocab_size = 16


class Model(nn.Module):

    def __init__(self):
        super().__init__()
        self.img_prj = nn.Conv2d(3, n_dim, (patch_l, patch_l), patch_l)
        self.image_pos_emb = nn.Embedding(patch_n, n_dim)
        self.transformer = nn.Transformer(d_model=n_dim, dim_feedforward=n_dim*4, batch_first=True)

        self.text_pos_emb = nn.Embedding(text_l, n_dim)
        self.text_emb = nn.Embedding(vocab_size, n_dim)

        self.vocab_prj = nn.Linear(n_dim, vocab_size)
        self.mask = self.register_buffer('mask', torch.tril(torch.ones([text_l, text_l], dtype=torch.bool)))
        
    # image input size should be (C, H, W)
    def forward(self, image, text):
        image = self.img_prj(image)
        image = image.flatten(-2, -1)
        image_pos = torch.arange(0, patch_n, dtype=torch.long)
        image_pos_emb = self.image_pos_emb(image_pos)
        image = torch.transpose(image, -1, -2)
        image = image + image_pos_emb

        text_emb = self.text_emb(text)
        text_pos = torch.arange(0, text_l, dtype=torch.long)
        text_pos_emb = self.text_pos_emb(text_pos)
        text_emb = text_emb + text_pos_emb

        y = self.transformer(image, text_emb, tgt_mask=self.mask)
        y = self.vocab_prj(y)
        return y

