from turtle import forward
import torch.nn as nn
import math
from basic.SlideWindow import merge_chuncked_reps
from modules.Layer import *
from modules.ScaleMix import *


class SubNet(nn.Module):
    def __init__(self, vocab, config, start_layer, bert_layers, layer_num, bert_hidden_size):
        super(SubNet, self).__init__()
        self.config = config
        self.vocab = vocab

        self.start_layer = start_layer
        self.bert_layers = bert_layers
        self.layer_num = layer_num
        config.bert_hidden_size = bert_hidden_size

        self.mlp_words = nn.ModuleList([NonLinear(config.bert_hidden_size, config.word_dims, activation=nn.GELU())
                                           for i in range(self.layer_num)])
        self.rescale = ScalarMix(mixture_size=self.layer_num)
        for i in range(self.layer_num):
            nn.init.orthogonal_(self.mlp_words[i].linear.weight)
            nn.init.zeros_(self.mlp_words[i].linear.bias)
    
    def forward(self, hidden_states):
        bert_inputs = []
        for idx in range(self.start_layer, self.bert_layers):
            one_layer = hidden_states[idx]
            bert_inputs.append(one_layer)
        proj_hiddens = []
        for idx in range(self.layer_num):
            proj_hidden = self.mlp_words[idx](bert_inputs[idx])
            proj_hiddens.append(proj_hidden)
        x_embeds = self.rescale(proj_hiddens)
        pooled_x = x_embeds[:, 0, :]
        return pooled_x

class GlobalEncoder(nn.Module):
    def __init__(self, vocab, config, auto_extractor):
        super(GlobalEncoder, self).__init__()

        self.config = config
        self.auto_extractor = auto_extractor
        #self.drop_emb = nn.Dropout(config.dropout_emb)

        self.start_layer = auto_extractor.start_layer
        self.bert_layers = auto_extractor.bert_layers
        self.layer_num = auto_extractor.layer_num
        config.bert_hidden_size = auto_extractor.auto_model.config.hidden_size

        self.p_s = SubNet(vocab, config, 
                                self.start_layer, self.bert_layers, self.layer_num,
                                config.bert_hidden_size)

        self.p_t = SubNet(vocab, config, 
                                self.start_layer, self.bert_layers, self.layer_num,
                                config.bert_hidden_size)

        self.p_neg_s = SubNet(vocab, config, 
                                self.start_layer, self.bert_layers, self.layer_num,
                                config.bert_hidden_size)

        self.p_neg_t = SubNet(vocab, config, 
                                self.start_layer, self.bert_layers, self.layer_num,
                                config.bert_hidden_size)

    def forward(self,
                s_input_ids, s_att_mask, 
                t_input_ids, t_att_mask,
                neg_s_input_ids, neg_s_att_mask, 
                neg_t_input_ids, neg_t_att_mask
                ):
        s_hidden_states = self.auto_extractor(s_input_ids, s_att_mask)
        pooled_s = self.p_s(s_hidden_states)

        t_hidden_states = self.auto_extractor(t_input_ids, t_att_mask)
        pooled_t = self.p_t(t_hidden_states)

        b, n, t = neg_s_input_ids.size()
        b, n, t = neg_s_att_mask.size()
        neg_s_hidden_states = self.auto_extractor(neg_s_input_ids.view(-1, t), neg_s_att_mask.view(-1, t))
        pooled_neg_s = self.p_neg_t(neg_s_hidden_states)
        pooled_neg_s = pooled_neg_s.view(b, n, -1)

        b, n, t = neg_t_input_ids.size()
        b, n, t = neg_t_att_mask.size()
        neg_t_hidden_states = self.auto_extractor(neg_t_input_ids.view(-1, t), neg_t_att_mask.view(-1, t))
        pooled_neg_t = self.p_neg_t(neg_t_hidden_states)
        pooled_neg_t = pooled_neg_t.view(b, n, -1)

        return pooled_s, pooled_t, pooled_neg_s, pooled_neg_t