#coding=utf-8
import torch.nn as nn
from language_model import WordEmbedding, QuestionEmbedding
from utils import CAT, create_objectPairs
from FCOutputModel import FCOutputModel
from Classifier import Classifier
class RN(nn.Module):
    def __init__(self, w_emb, q_emb, v_proj, q_proj, g_fc, f_fc):
        super(RN, self).__init__()
        self.w_emb = w_emb
        self.q_emb = q_emb
        self.v_proj = v_proj
        self.q_proj = q_proj
        self.g_fc = g_fc
        self.f_fc = f_fc


    def forward(self, img, qst, labels):
        w_emb = self.w_emb(qst)
        q_emb = self.q_emb(w_emb)

        v_proj = self.v_proj(img)
        q_proj = self.q_proj(q_emb)







        object_pairs = create_objectPairs(v_proj, v_proj)
        k = object_pairs.size(1)
        q_proj = q_proj.unsqueeze(1).repeat(1, k, 1)
        joint_repr = CAT(object_pairs, q_proj)

        """g"""
        g_fc = self.g_fc(joint_repr)
        g_fc = g_fc.sum(1).squeeze()

        """f"""
        logits = self.f_fc(g_fc)
        return logits



def create_rn(dataset, num_hid):
    w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
    q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0, rnn_type='LSTM')
    v_proj = FCOutputModel([num_hid*2, num_hid])
    q_proj = FCOutputModel([num_hid, num_hid])
    g_fc = FCOutputModel([3072, 1024, 1024]) #3072, 1024, 1024
    f_fc = Classifier([1024, dataset.num_ans_candidates]) #2048, 1852
    return RN(w_emb, q_emb,v_proj, q_proj, g_fc, f_fc)

# if __name__ == '__main__':
#     w_emb = WordEmbedding(2936, 300, 0.0)
#     q_emb = QuestionEmbedding(300, 1024, 1, False, 0.0, rnn_type='LSTM')
#     g_fc = FCOutputModel([5124, 2048, 2048, 2048, 2048]) #5124, 2048, 2048, 2048, 2048
#     f_fc = Classifier([2048, 2048, 2048, 1852]) #2048, 2048, 2048, 1852
#     rn = RN(w_emb, q_emb, g_fc, f_fc)
#     print(rn)
#     pass