class Aligner(object):
    def __init__(self, global_encoder, decoder, config) -> None:
        self.config = config
        self.global_encoder = global_encoder
        self.decoder = decoder

        self.use_cuda = next(filter(lambda p: p.requires_grad, global_encoder.parameters())).is_cuda
        pass

    def train(self):
        self.global_encoder.train()
        self.decoder.train()

        self.training = True
    
    def eval(self):
        self.global_encoder.eval()
        self.decoder.eval()

        self.training = False
    
    def forward(self,
                s_input_ids, s_att_mask, t_input_ids, t_att_mask,
                neg_s_input_ids, neg_s_att_mask, 
                neg_t_input_ids, neg_t_att_mask
            ):

        if self.use_cuda:
            s_input_ids = s_input_ids.cuda()
            s_att_mask = s_att_mask.cuda()
            t_input_ids = t_input_ids.cuda()
            t_att_mask = t_att_mask.cuda()

            neg_s_input_ids, neg_s_att_mask = \
                neg_s_input_ids.cuda(), neg_s_att_mask.cuda()

            neg_t_input_ids, neg_t_att_mask = \
                neg_t_input_ids.cuda(), neg_t_att_mask.cuda()



        pooled_s_x, pooled_t_x, pooled_neg_s, pooled_neg_t = \
            self.global_encoder(
                s_input_ids, s_att_mask, 
                t_input_ids, t_att_mask,

                neg_s_input_ids, neg_s_att_mask, 
                neg_t_input_ids, neg_t_att_mask,
            )
        
        self.loss = self.decoder(pooled_s_x, pooled_t_x,
                                    pooled_neg_s, pooled_neg_t)
    

    def compute_loss(self,):
        return self.loss