import torch.nn as nn
import torch

from basic.SlideWindow import merge_chuncked_reps


class GlobalEncoder(nn.Module):

    def __init__(self, vocab, config, plm_model):
        super(GlobalEncoder, self).__init__()

        self.config = config
        self.plm_model = plm_model
        config.bert_hidden_size = plm_model.config.hidden_size
        # self.mlp_words = nn.ModuleList([NonLinear(config.bert_hidden_size, config.word_dims, activation=nn.GELU())\
        #                                 for i in range(self.layer_num)])

        # for i in range(self.layer_num):
        #     nn.init.orthogonal_(self.mlp_words[i].linear.weight)
        #     nn.init.zeros_(self.mlp_words[i].linear.bias)

        # self.rescale = ScalarMix(mixture_size=self.layer_num)

    def forward(self, doc_inputs, edu_offset, b_win):
        b, t, w = doc_inputs['input_ids'].size()

        for k, v in doc_inputs.items():
            if isinstance(v, torch.Tensor):
                doc_inputs[k] = v.view(-1, w)

        plm_outputs = self.plm_model(**doc_inputs)
        token_reps = plm_outputs.last_hidden_state
        token_reps =  merge_chuncked_reps(token_reps.view(b, t, w, -1), b_win)

        b, t, h = token_reps.size()
        edu_reps = torch.gather(input=token_reps, index=edu_offset.unsqueeze(-1).repeat(1, 1, h), dim=1)

        return edu_reps