import os
import sys
sys.path.append("../../")
from tqdm import tqdm
import torch
from copy import deepcopy
from typing import List
from torch import nn
from modeling_bert import BertForSequenceClassification
from modeling_bert import BertModel
from transformers import BertTokenizer
from modeling_distilbert import DistilBertForSequenceClassification
from modeling_distilbert import DistilBertModel
import numpy as np
from torch.nn import functional as F
from transformers import PreTrainedTokenizer
from argparse import Namespace
from sklearn.metrics import accuracy_score

class GradientReversal(torch.autograd.Function):
    """
    Basic layer for doing gradient reversal
    """
    lambd = 1.0
    @staticmethod
    def forward(ctx, x):
        return x

    @staticmethod
    def backward(ctx, grad_output):
        return GradientReversal.lambd * grad_output.neg()


class GANBert(nn.Module):
    def __init__(self, bert:BertModel, tokenizer:BertTokenizer, device:torch.device):
        super(GANBert, self).__init__()
        self.bert = bert
        self.tokenizer = tokenizer
        hidden_size = bert.config.hidden_size
        self.device = device
        self.gan_layer = nn.Sequential(nn.Linear(hidden_size, 2*hidden_size),
                                       nn.GELU(),
                                       nn.Linear(2*hidden_size, hidden_size))

    def sents2ids(self, text, text_pair=None):
        max_len = self.tokenizer.max_len if hasattr(self.tokenizer, 'max_len') else self.tokenizer.model_max_length
        input_ids = [self.tokenizer.encode(t, add_special_tokens=True, max_length=max_len) for t in text]
        if text_pair is None:
            masks = [[1] * len(i) for i in input_ids]
            return input_ids, masks
        else:
            assert len(text) == len(text_pair)
            pair_ids = [self.tokenizer.encode(t, add_special_tokens=True, max_length=max(0, max_len - len(input_ids[idx])))
                        for idx, t in enumerate(text_pair)]
            token_type_ids = [[0] * len(ids) + [1] * len(p_ids[1:]) for ids, p_ids in zip(input_ids, pair_ids)]
            input_ids = [ids + p_ids[1:] for ids, p_ids in zip(input_ids, pair_ids)]
            masks = [[1] * len(i) for i in input_ids]
            return input_ids, masks, token_type_ids

    def ids2tensors(self, input_ids, masks, seg_ids=None):
        max_length = max([len(i) for i in input_ids])
        input_ids = [(i + [0] * (max_length - len(i))) for i in input_ids]
        masks = [(m + [0] * (max_length - len(m))) for m in masks]
        assert (all(len(i) == max_length for i in input_ids))
        assert (all(len(m) == max_length for m in masks))
        if seg_ids is None:
            return torch.tensor(input_ids, device=self.device), \
                   torch.tensor(masks, device=self.device)
        else:
            seg_ids = [(s + [0] * (max_length - len(s))) for s in seg_ids]
            assert (all(len(s) == max_length for s in seg_ids))
            return torch.tensor(input_ids, device=self.device), \
                   torch.tensor(masks, device=self.device), \
                   torch.tensor(seg_ids, device=self.device)

    def sents2tensors(self, sents):
        input_ids, masks = self.sents2ids(sents)
        inputTensor, maskTensor = self.ids2tensors(input_ids, masks)
        return inputTensor, maskTensor

    def initConfig(self,
                   input_ids=None,
                   attention_mask=None,
                   token_type_ids=None,
                   head_mask=None,
                   inputs_embeds=None,
                   encoder_hidden_states=None,
                   encoder_attention_mask=None,
                   past_key_values=None,
                   use_cache=None,
                   output_attentions=None,
                   output_hidden_states=None,
                   return_dict=None
        ):
        output_attentions = output_attentions if output_attentions is not None else self.bert.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.bert.config.output_hidden_states
        )
        return_dict = return_dict if return_dict is not None else self.bert.config.use_return_dict

        if self.bert.config.is_decoder:
            use_cache = use_cache if use_cache is not None else self.bert.config.use_cache
        else:
            use_cache = False

        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        elif input_ids is not None:
            input_shape = input_ids.size()
            batch_size, seq_length = input_shape
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
            batch_size, seq_length = input_shape
        else:
            raise ValueError("You have to specify either input_ids or inputs_embeds")

        device = input_ids.device if input_ids is not None else inputs_embeds.device

        # past_key_values_length
        past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0

        if attention_mask is None:
            attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)

        if token_type_ids is None:
            if hasattr(self.bert.embeddings, "token_type_ids"):
                buffered_token_type_ids = self.bert.embeddings.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)

        # We can provide a self.bert.attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        extended_attention_mask: torch.Tensor = self.bert.get_extended_attention_mask(attention_mask, input_shape, device)

        # If a 2D or 3D attention mask is provided for the cross-attention
        # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
        if self.bert.config.is_decoder and encoder_hidden_states is not None:
            encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
            encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
            if encoder_attention_mask is None:
                encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
            encoder_extended_attention_mask = self.bert.invert_attention_mask(encoder_attention_mask)
        else:
            encoder_extended_attention_mask = None

        # Prepare head mask if needed
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
        # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
        head_mask = self.bert.get_head_mask(head_mask, self.bert.config.num_hidden_layers)
        return (
                   token_type_ids,
                   extended_attention_mask,
                   head_mask,
                   encoder_hidden_states,
                   encoder_extended_attention_mask,
                   past_key_values,
                   use_cache,
                   output_attentions,
                   output_hidden_states,
                   return_dict
        )

    def embedding2vecs(self,
                       embedding_output,
                       extended_attention_mask,
                       head_mask,
                       encoder_hidden_states,
                       encoder_extended_attention_mask,
                       past_key_values,
                       use_cache,
                       output_attentions,
                       output_hidden_states,
                       return_dict
        ):
        encoder_outputs = self.bert.encoder(
            embedding_output,
            attention_mask=extended_attention_mask,
            head_mask=head_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_extended_attention_mask,
            past_key_values=past_key_values,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        hidden_states = encoder_outputs.last_hidden_state  # [batchsize, sent_len, 768]
        return hidden_states.mean(dim=1)

    def forward(self, sents):
        inputTensor, maskTensor = self.sents2tensors(sents)
        token_type_ids,extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask, \
        past_key_values, use_cache, output_attentions, output_hidden_states, return_dict = self.initConfig(
            input_ids=inputTensor,
            attention_mask=maskTensor,
            token_type_ids=None,
            head_mask=None,
            inputs_embeds=None,
            encoder_hidden_states=None,
            encoder_attention_mask=None,
            past_key_values=None,
            use_cache=None,
            output_attentions=None,
            output_hidden_states=None,
            return_dict=None
        )
        embedding_output = self.embeddings(
            input_ids=inputTensor,
            position_ids=None,
            token_type_ids=None,
            inputs_embeds=None,
            past_key_values_length=None,
        )
        vecs = self.embedding2vecs(
            embedding_output,
            extended_attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_extended_attention_mask,
            past_key_values,
            use_cache,
            output_attentions,
            output_hidden_states,
            return_dict
        )
        return  vecs

    def perturbation(self, sents):
        inputTensor, maskTensor = self.sents2tensors(sents)
        token_type_ids, extended_attention_mask, head_mask, encoder_hidden_states, encoder_extended_attention_mask, \
        past_key_values, use_cache, output_attentions, output_hidden_states, return_dict = self.initConfig(
            input_ids=inputTensor,
            attention_mask=maskTensor,
            token_type_ids=None,
            head_mask=None,
            inputs_embeds=None,
            encoder_hidden_states=None,
            encoder_attention_mask=None,
            past_key_values=None,
            use_cache=None,
            output_attentions=None,
            output_hidden_states=None,
            return_dict=None
        )
        embedding_output = self.embeddings(
            input_ids=inputTensor,
            position_ids=None,
            token_type_ids=None,
            inputs_embeds=None,
            past_key_values_length=None,
        )
        noise = self.gan_layer(embedding_output)
        norm = noise.norm(dim=-1)
        noise_norm = noise/(norm.unsqueeze(dim=-1))
        adver_noise = GradientReversal.apply(noise_norm)
        vecs = self.embedding2vecs(
            embedding_output + 5e-3*adver_noise,
            extended_attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_extended_attention_mask,
            past_key_values,
            use_cache,
            output_attentions,
            output_hidden_states,
            return_dict
        )
        return vecs

    def contrastiveLoss(self, sents, temperature=0.1):
        vecs1 = self.forward(sents)
        vecs2 = self.perturbation(sents)
        norm_mtx = torch.matmul(vecs1.norm(dim=1).unsqueeze(-1),
                                vecs2.norm(dim=1).unsqueeze(0)) \
                   + torch.ones([len(vecs1), len(vecs2)], device=vecs2.device) * 1e-8
        cosine = torch.matmul(vecs1, vecs2.T) / norm_mtx
        similarity = F.softmax(cosine/temperature, dim=1)
        epsilon = torch.ones_like(similarity) * 1e-8
        similarity = (similarity - epsilon).abs()
        return F.nll_loss(similarity.log(),
                          torch.arange(len(sents), device=similarity.device))

    def optimG(self, sents, max_step=10):
        optim = torch.optim.Adam([
            {'params': self.gan_layer.parameters(), 'lr': 5e-1}
        ])
        for step in range(max_step):
            loss = self.contrastiveLoss(sents, temperature=1.0)
            optim.zero_grad()
            loss.backward()
            optim.step()
            print("####Generator Training####: %3d | %3d , contrastive loss = %6.8f".format(
                step, max_step, loss.data.item()
            ))


class VanillaBert(nn.Module):
    """
    A really basic wrapper around BERT
    """
    def __init__(self, bert: BertForSequenceClassification, **kwargs):
        super(VanillaBert, self).__init__()
        self.__device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
        self.bert = bert.to(self.device)

    @property
    def device(self):
        return self.__device

    def set_device(self, new_device):
        self.__device = new_device
        self.bert = self.bert.to(self.__device)

    def DataParallel(self):
        self.bert.DataParallel()

    def Sents2Vecs(self, input_ids: torch.LongTensor,
                         token_ids: torch.LongTensor,
                         attention_mask: torch.LongTensor):
        if input_ids.device != self.__device: # data is on a different device
            input_ids, masks, seg_ids = input_ids.to(self.__device), \
                                        attention_mask.to(self.__device), \
                                        token_ids.to(self.__device)
        encoder_dict = self.bert.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_ids
        )
        hidden_states = encoder_dict.last_hidden_state  #[batchsize, sent_len, 768]
        return hidden_states.mean(dim=1)

    def PoolerVecs(self, input_ids: torch.LongTensor,
                         token_ids: torch.LongTensor,
                         attention_mask: torch.LongTensor):
        if input_ids.device != self.__device: # data is on a different device
            input_ids, masks, token_ids = input_ids.to(self.__device), \
                                        attention_mask.to(self.__device), \
                                        token_ids.to(self.__device)
        encoder_dict = self.bert.bert.forward(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_ids
        )
        return encoder_dict.pooler_output

    def forward( self, input_ids: torch.LongTensor, token_ids: torch.LongTensor,
            attention_mask: torch.LongTensor, domains: torch.LongTensor = None,
            labels: torch.LongTensor = None):
        if input_ids.device != self.__device: # data is on a different device
            input_ids, masks, seg_ids = input_ids.to(self.__device), \
                                        attention_mask.to(self.__device), \
                                        token_ids.to(self.__device)
        return self.bert(input_ids, token_type_ids=token_ids, labels=labels,
                        attention_mask=attention_mask)

    def predict(self, batch, temperature=1.0):
        if batch[0].device != self.device: # data is on a different device
            input_ids, masks, seg_ids = batch[0].to(self.device), \
                                                batch[1].to(self.device), \
                                                batch[2].to(self.device)
        else:
            input_ids, masks, seg_ids = batch[0], batch[1], batch[2]
        pooled_output = self.PoolerVecs(input_ids, token_ids=seg_ids, attention_mask=masks)
        pooled_output = self.bert.dropout(pooled_output)
        logits = self.bert.classifier(pooled_output)
        return F.softmax(logits/temperature, dim=1)

    def lossAndAcc(self, batch, temperature=1.0):
        preds = self.predict(batch, temperature=temperature)
        epsilon = torch.ones_like(preds)*(1e-8)
        preds = (preds - epsilon).abs()
        label = batch[3].to(self.device) if batch[3].dim() == 1 else batch[3].to(self.device).argmax(dim=1)
        loss = F.nll_loss(preds.log(), label)
        acc = accuracy_score(label.cpu(), preds.data.argmax(dim=1).cpu())
        return loss, acc


class DomainAdversarialBert(nn.Module):
    """
    A really basic wrapper around BERT
    """
    def __init__(self, bert: BertModel, n_domains: int, n_classes: int = 2, supervision_layer=12, **kwargs):
        super(DomainAdversarialBert, self).__init__()
        self.bert = bert
        self.domain_classifier = nn.Linear(bert.config.hidden_size, n_domains)
        self.supervision_layer = supervision_layer

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None):

        # 1) Get the CLS representation from BERT
        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask
        )
        # (b x n_classes)
        classifier_logits = outputs[0]

        divisor = min(1, 2 * (len(outputs[1]) - self.supervision_layer))
        domain_supervision_layer = outputs[1][self.supervision_layer][:, 0, :]
        adv_input = GradientReversal.apply(domain_supervision_layer)

        adv_logits = self.domain_classifier(adv_input)

        outputs = (classifier_logits,)

        loss_fn = nn.CrossEntropyLoss()
        if domains is not None:
            # Scale the adversarial loss depending on how deep in the network it is
            loss = (1e-3 / divisor) * loss_fn(adv_logits, domains)

            if labels is not None:
                loss += loss_fn(classifier_logits, labels)
            outputs = (loss,) + outputs
        elif labels is not None:
            loss = loss_fn(classifier_logits, labels)
            outputs = (loss,) + outputs

        return outputs

########################################
# Multi-Transformer Network
########################################

class TransformerNetwork(nn.Module):
    """
    Multiple transformers for different domains
    """

    def __init__(
            self,
            bert_embeddings,
            ff_dim: int = 2048,
            d_model: int = 768,
            n_domains: int = 2,
            n_layers: int = 6,
            n_classes: int = 2,
            n_heads: int = 6,
    ):
        super(TransformerNetwork, self).__init__()
        self.ff_dim = ff_dim
        self.d_model = d_model
        self.n_domains = n_domains

        self.bert_embeddings = bert_embeddings

        self.xformer =nn.TransformerEncoder(
            nn.TransformerEncoderLayer(
                d_model,
                n_heads,
                dim_feedforward=ff_dim
            ),
            n_layers
        )

        # final classifier layers (d_model x n_classes)
        self.classifier = nn.Linear(d_model, n_classes)

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None
    ):
        embs = self.bert_embeddings(input_ids=input_ids)

        # Sequence length first
        inputs = embs.permute(1, 0, 2)
        # Flags the 0s instead of 1s
        masks = attention_mask == 0


        output = self.xformer(inputs, src_key_padding_mask=masks)
        pooled_output = output[0]
        logits = self.classifier(pooled_output)
        loss_fn = nn.CrossEntropyLoss()
        loss = loss_fn(logits, labels)
        return (loss, logits)


class TransformerClassifier(nn.Module):
    """
        Multiple transformers for different domains
        """

    def __init__(
            self,
            bert_embeddings,
            ff_dim: int = 2048,
            d_model: int = 768,
            n_layers: int = 6,
            n_heads: int = 6,
            n_classes: int = 2,
            **kwargs
    ):
        super(TransformerClassifier, self).__init__()
        self.ff_dim = ff_dim
        self.d_model = d_model

        self.bert_embeddings = bert_embeddings

        self.xformer = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(
                d_model,
                n_heads,
                dim_feedforward=ff_dim
            ),
            n_layers
        )

        # final classifier layers (d_model x n_classes)
        self.classifier = nn.Linear(d_model, n_classes)

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None
    ):
        embs = self.bert_embeddings(input_ids=input_ids)

        # Sequence length first
        inputs = embs.permute(1, 0, 2)
        # Flags the 0s instead of 1s
        masks = attention_mask == 0

        output = self.xformer(inputs, src_key_padding_mask=masks)
        pooled_output = output[0]
        logits = self.classifier(pooled_output)
        outputs = (logits,)
        if labels is not None:
            loss_fn = nn.CrossEntropyLoss()
            loss = loss_fn(logits, labels)
            outputs = (loss,) + outputs

        return outputs

class MultiTransformer(nn.Module):
    """
        Multiple transformers for different domains
        """

    def __init__(
            self,
            bert_embeddings,
            ff_dim: int = 2048,
            d_model: int = 768,
            n_domains: int = 2,
            n_layers: int = 6,
            n_heads: int = 6,
    ):
        super(MultiTransformer, self).__init__()
        self.ff_dim = ff_dim
        self.d_model = d_model
        self.n_domains = n_domains

        self.bert_embeddings = bert_embeddings

        self.xformer = nn.ModuleList([nn.TransformerEncoder(
            nn.TransformerEncoderLayer(
                d_model,
                n_heads,
                dim_feedforward=ff_dim
            ),
            n_layers
        ) for d in range(n_domains)])

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None
    ):
        embs = self.bert_embeddings(input_ids=input_ids)

        # Sequence length first
        inputs = embs.permute(1, 0, 2)
        # Flags the 0s instead of 1s
        masks = attention_mask == 0

        if domains is not None:
            domain = domains[0]
            output = self.xformer[domain](inputs, src_key_padding_mask=masks)
            pooled_output = output[0]
            return pooled_output

        else:
            pooled_outputs = []
            for d in range(self.n_domains):
                output = self.xformer[d](inputs, src_key_padding_mask=masks)
                pooled_outputs.append(output[0])
            return pooled_outputs


class MultiTransformerClassifier(nn.Module):
    """
    Multiple transformers for different domains
    """

    def __init__(
            self,
            bert_embeddings,
            ff_dim: int = 2048,
            d_model: int = 768,
            n_domains: int = 2,
            n_layers: int = 6,
            n_classes: int = 2,
            n_heads: int = 6,
    ):
        super(MultiTransformerClassifier, self).__init__()
        self.ff_dim = ff_dim
        self.d_model = d_model
        self.n_domains = n_domains

        self.multi_xformer = MultiTransformer(
            bert_embeddings,
            ff_dim=ff_dim,
            d_model=d_model,
            n_domains=n_domains,
            n_layers=n_layers,
            n_heads=n_heads
        )

        # final classifier layers (d_model x n_classes)
        self.classifier = nn.ModuleList([nn.Linear(d_model, n_classes) for d in range(n_domains)])

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None
    ):

        if domains is not None:
            domain = domains[0]
            pooled_output = self.multi_xformer(input_ids, attention_mask, domains)
            logits = self.classifier[domain](pooled_output)
            loss_fn = nn.CrossEntropyLoss()
            loss = loss_fn(logits, labels)
            return (loss, logits)

        else:
            logits_all = []
            pooled_outputs = self.multi_xformer(input_ids, attention_mask)
            for d,po in enumerate(pooled_outputs):
                logits_all.append(self.classifier[d](po))

            loss_fn = nn.CrossEntropyLoss()
            loss = torch.mean(torch.stack([loss_fn(logits, labels) for logits in logits_all]))
            # b x ndom x 2
            votes = torch.stack(logits_all, dim=1)
            self.votes = votes
            # Normalize with softmax
            logits_all = torch.nn.Softmax(dim=-1)(votes)
            logits = torch.mean(logits_all, dim=1)

            return loss, logits


class MultiDistilBert(nn.Module):
    """
    Multiple transformers for different domains
    """

    def __init__(
            self,
            model_name,
            config,
            n_domains: int = 2,
            init_weights: List = None
    ):
        super(MultiDistilBert, self).__init__()
        self.models = nn.ModuleList([DistilBertModel.from_pretrained(model_name, config=config) for d in range(n_domains)])
        if init_weights is not None:
            if 'distilbert' in list(init_weights.keys())[0]:
                init_weights = {k[11:]: v for k,v in init_weights.items()}
            for m in self.models:
                model_dict = m.state_dict()
                model_dict.update(deepcopy(init_weights))
                m.load_state_dict(model_dict)
        self.n_domains = n_domains
        self.d_model = config.hidden_size

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None
    ):

        if domains is not None:
            domain = domains[0]
            outputs = self.models[domain](input_ids, attention_mask=attention_mask)
            return outputs[0][:,0,:]

        else:
            pooled_outputs = []
            for d in range(self.n_domains):
                output = self.models[d](input_ids, attention_mask=attention_mask)
                pooled_outputs.append(output[0][:,0,:])
            return pooled_outputs


class MultiDistilBertClassifier(nn.Module):
    """
    Multiple transformers for different domains
    """

    def __init__(
            self,
            model_name,
            config,
            n_domains: int = 2,
            n_classes: int = 2,
            init_weights: List = None
    ):
        super(MultiDistilBertClassifier, self).__init__()
        self.multi_xformer = MultiDistilBert(model_name, config, n_domains=n_domains, init_weights=init_weights)
        self.n_domains = n_domains
        self.d_model = config.hidden_size
        # final classifier layers (d_model x n_classes)
        self.classifier = nn.ModuleList([nn.Linear(config.hidden_size, n_classes) for d in range(n_domains)])

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None
    ):

        if domains is not None:
            domain = domains[0]
            output = self.multi_xformer(input_ids, attention_mask=attention_mask, domains=domains)
            logits = self.classifier[domain](output)
            loss_fn = nn.CrossEntropyLoss()
            loss = loss_fn(logits, labels)
            return (loss, logits)

        else:
            logits_all = []
            pooled_outputs = self.multi_xformer(input_ids, attention_mask=attention_mask)
            for d,po in enumerate(pooled_outputs):
                logits_all.append(self.classifier[d](po))

            loss_fn = nn.CrossEntropyLoss()
            loss = torch.mean(torch.stack([loss_fn(logits, labels) for logits in logits_all]))
            # b x ndom x 2
            votes = torch.stack(logits_all, dim=1)
            self.votes = votes
            # Normalize with softmax
            logits_all = torch.nn.Softmax(dim=-1)(votes)
            logits = torch.mean(logits_all, dim=1)

            return loss, logits


class MultiTransformerNetwork(nn.Module):
    """
    Multiple transformers for different domains
    """

    def __init__(
            self,
            bert_embeddings,
            ff_dim: int = 2048,
            d_model: int = 768,
            n_domains: int = 2,
            n_layers: int = 6,
            n_classes: int = 2,
            n_heads: int = 6,
    ):
        super(MultiTransformerNetwork, self).__init__()
        self.ff_dim = ff_dim
        self.d_model = d_model
        self.n_domains = n_domains

        self.bert_embeddings = bert_embeddings

        self.xformer = nn.ModuleList([nn.TransformerEncoder(
            nn.TransformerEncoderLayer(
                d_model,
                n_heads,
                dim_feedforward=ff_dim
            ),
            n_layers
        ) for d in range(n_domains)])

        # final classifier layers (d_model x n_classes)
        self.classifier = nn.ModuleList([nn.Linear(d_model, n_classes) for d in range(n_domains)])

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None
    ):
        embs = self.bert_embeddings(input_ids=input_ids)

        # Sequence length first
        inputs = embs.permute(1, 0, 2)
        # Flags the 0s instead of 1s
        masks = attention_mask == 0

        if domains is not None:
            domain = domains[0]
            output = self.xformer[domain](inputs, src_key_padding_mask=masks)
            pooled_output = output[0]
            logits = self.classifier[domain](pooled_output)
            loss_fn = nn.CrossEntropyLoss()
            loss = loss_fn(logits, labels)
            return (loss, logits)

        else:
            logits_all = []
            for d in range(self.n_domains):
                output = self.xformer[d](inputs)
                pooled_output = output[0]
                logits_all.append(self.classifier[d](pooled_output))

            loss_fn = nn.CrossEntropyLoss()
            loss = torch.mean(torch.stack([loss_fn(logits, labels) for logits in logits_all]))
            # b x ndom x 2
            votes = torch.stack(logits_all, dim=1)
            self.votes = votes
            # Normalize with softmax
            logits_all = torch.nn.Softmax(dim=-1)(votes)
            logits = torch.sum(logits_all, dim=1)

            return loss, logits


#############################
# Multi-View domain adaptation modules
#############################
class MultiViewTransformerNetwork(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """
    def __init__(self, multi_xformer: MultiTransformerNetwork, shared_bert: VanillaBert, n_classes: int = 2):
        super(MultiViewTransformerNetwork, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.shared_bert = shared_bert.bert.bert

        self.d_model = self.multi_xformer.d_model
        self.dim_param = nn.Parameter(torch.FloatTensor([self.multi_xformer.d_model]), requires_grad=False)
        self.n_domains = self.multi_xformer.n_domains
        self.n_classes = n_classes

        # Query matrix
        self.Q = nn.Parameter(torch.randn((self.multi_xformer.d_model, self.multi_xformer.d_model)), requires_grad=True)
        # Key matrix
        self.K = nn.Parameter(torch.randn((self.multi_xformer.d_model, self.multi_xformer.d_model)), requires_grad=True)
        # Value matrix
        self.V = nn.Parameter(torch.randn((self.multi_xformer.d_model, self.multi_xformer.d_model)), requires_grad=True)

        # Main classifier
        self.task_classifier = nn.Linear(self.multi_xformer.d_model, n_classes)
        # TODO: Introduce aux tasks if needed

        nn.init.xavier_uniform_(self.Q)
        nn.init.xavier_uniform_(self.K)
        nn.init.xavier_uniform_(self.V)

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x dim
        shared_output = self.shared_bert(input_ids, attention_mask=attention_mask)[1]
        # Values b x n_domain + 1 x dim
        v = torch.stack(pooled_outputs + [shared_output], dim=1)
        # Queries b x dim
        q = shared_output @ self.Q
        # Keys b*(n_domain + 1) x dim
        k = v.view(-1, self.d_model) @ self.K
        # Attention (scaled dot product) b x (n_domain + 1)
        attn = torch.sum(k.view(-1, self.n_domains + 1, self.d_model) * q.unsqueeze(1), dim=-1) / torch.sqrt(self.dim_param)
        attn = nn.Softmax(dim=-1)(attn)
        v = v.view(-1, self.d_model) @ self.V
        v = v.view(-1, self.n_domains + 1, self.d_model)
        # Attend to the values b x dim
        o = torch.sum(attn.unsqueeze(-1) * v, dim=1)

        # Classifier
        logits = self.task_classifier(o)
        outputs = (logits,)
        if labels is not None:
            # Loss
            loss_fn = nn.CrossEntropyLoss()
            loss = loss_fn(logits, labels)
            outputs = (loss,) + outputs
        if ret_alpha:
            outputs += (attn,)
        return outputs


class MultiViewTransformerNetworkAveraging(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, multi_xformer: MultiTransformerClassifier, shared_bert: VanillaBert, n_classes: int = 2):
        super(MultiViewTransformerNetworkAveraging, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.multi_xformer_classifiers = multi_xformer.classifier
        self.shared_bert = shared_bert.bert

        self.d_model = multi_xformer.d_model
        self.dim_param = nn.Parameter(torch.FloatTensor([multi_xformer.d_model]), requires_grad=False)
        self.n_domains = multi_xformer.n_domains
        self.n_classes = n_classes

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x dim
        outputs = self.shared_bert(input_ids, attention_mask=attention_mask)
        logits_shared = outputs[0]

        softmax = nn.Softmax()
        logits_private = [self.multi_xformer_classifiers[d](pooled_outputs[d]) for d in range(self.n_domains)]

        if domains is not None and self.training:
            logits = [l for j, l in enumerate(logits_private) if j != domains[0]] + [logits_shared]
        else:
            logits = logits_private + [logits_shared]
        attn = 1 / len(logits)

        # b x n_dom(+1) x nclasses
        preds = torch.stack([softmax(logs) for logs in logits], dim=1)
        # Apply attention
        preds = torch.sum(preds * attn, dim=1)
        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            loss = (0.5) * loss_fn(torch.log(preds), labels)
            # Strong supervision on in domain
            if domains is not None:
                domain = domains[0]
                domain_logits = logits_private[domain]
                xent = nn.CrossEntropyLoss()
                loss += (0.5) * xent(domain_logits, labels)

            outputs = (loss,) + outputs
        if ret_alpha:
            outputs += (torch.cuda.FloatTensor([attn]).expand_as(preds),)
        return outputs


class MultiViewTransformerNetworkLearnedAveraging(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, multi_xformer: MultiTransformerClassifier, shared_bert: VanillaBert, n_classes: int = 2):
        super(MultiViewTransformerNetworkLearnedAveraging, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.multi_xformer_classifiers = multi_xformer.classifier
        self.shared_bert = shared_bert.bert

        self.d_model = multi_xformer.d_model
        self.dim_param = nn.Parameter(torch.FloatTensor([multi_xformer.d_model]), requires_grad=False)
        self.n_domains = multi_xformer.n_domains
        self.n_classes = n_classes
        self.alpha_params = nn.Parameter(torch.ones(self.n_domains + 1))

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x dim
        outputs = self.shared_bert(input_ids, attention_mask=attention_mask)
        logits_shared = outputs[0]

        softmax = nn.Softmax(dim=-1)
        logits_private = [self.multi_xformer_classifiers[d](pooled_outputs[d]) for d in range(self.n_domains)]

        if domains is not None and self.training:
            logits = [l for j, l in enumerate(logits_private) if j != domains[0]] + [logits_shared]
            indices = [j for j, l in enumerate(logits_private) if j != domains[0]] + [self.n_domains]
            alpha_weights = torch.gather(self.alpha_params, 0, torch.cuda.LongTensor(indices))
            attn = softmax(alpha_weights).unsqueeze(0).unsqueeze(2)
        else:
            logits = logits_private + [logits_shared]
            alpha_weights = self.alpha_params
            attn = softmax(alpha_weights).unsqueeze(0).unsqueeze(2)

        # b x n_dom(+1) x nclasses
        preds = torch.stack([softmax(logs) for logs in logits], dim=1)
        # Apply attention
        preds = torch.sum(preds * attn, dim=1)
        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            loss = (0.5) * loss_fn(torch.log(preds), labels)
            # Strong supervision on in domain
            if domains is not None:
                domain = domains[0]
                domain_logits = logits_private[domain]
                xent = nn.CrossEntropyLoss()
                loss += (0.5) * xent(domain_logits, labels)

            outputs = (loss,) + outputs
        if ret_alpha:
            outputs += (attn,)
        return outputs


class MultiViewTransformerNetworkAveragingIndividuals(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, bert_model, bert_config, n_domains: int = 2, n_classes: int = 2):
        super(MultiViewTransformerNetworkAveragingIndividuals, self).__init__()

        self.domain_experts = nn.ModuleList([DistilBertForSequenceClassification.from_pretrained(bert_model, config=bert_config)]*n_domains)
        self.shared_bert = DistilBertForSequenceClassification.from_pretrained(bert_model, config=bert_config)

        self.n_domains = n_domains
        self.n_classes = n_classes

        # Default weight is averaging
        self.weights = [1. / (self.n_domains + 1)] * (self.n_domains + 1)

        self.average = False

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            return_logits: bool = False
    ):

        logits_shared = self.shared_bert(input_ids, attention_mask=attention_mask)[0]

        softmax = nn.Softmax()

        if not self.average:
            if domains is not None:
                logits = self.domain_experts[domains[0]](input_ids, attention_mask=attention_mask)[0]
                # b x n_dom(+1) x nclasses
                preds = softmax(logits)
            else:
                logits = logits_shared
                # b x n_dom(+1) x nclasses
                preds = softmax(logits)
        else:
            logits_private = [self.domain_experts[d](input_ids, attention_mask=attention_mask)[0] for d in
                              range(self.n_domains)]
            logits = logits_private + [logits_shared]
            if return_logits:
                return logits
            attn = torch.cuda.FloatTensor(self.weights).view(1, -1, 1)
            # b x n_dom(+1) x nclasses
            preds = torch.stack([softmax(logs) for logs in logits], dim=1)
            # Apply attention
            preds = torch.sum(preds * attn, dim=1)

        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            xent = nn.CrossEntropyLoss()
            loss = loss_fn(torch.log(preds), labels) + xent(logits_shared, labels)

            outputs = (loss,) + outputs
        return outputs


class MultiViewTransformerNetworkDomainClassifierIndividuals(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, bert_model, bert_config, domain_classifier, n_domains: int = 2, n_classes: int = 2):
        super(MultiViewTransformerNetworkDomainClassifierIndividuals, self).__init__()

        self.domain_experts = nn.ModuleList([DistilBertForSequenceClassification.from_pretrained(bert_model, config=bert_config)]*n_domains)
        self.domain_classifier = domain_classifier

        self.n_domains = n_domains
        self.n_classes = n_classes

        self.average = False

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None
    ):

        softmax = nn.Softmax(dim=-1)
        if not self.average:
            logits = self.domain_experts[domains[0]](input_ids, attention_mask=attention_mask)[0]
            # b x n_dom(+1) x nclasses
            preds = softmax(logits)
        else:
            logits = [self.domain_experts[d](input_ids, attention_mask=attention_mask)[0] for d in
                              range(self.n_domains)]
            logits_shared = self.domain_classifier(input_ids, attention_mask=attention_mask)[0]
            attn = nn.Softmax(dim=-1)(logits_shared).unsqueeze(-1)
            # b x n_dom(+1) x nclasses
            preds = torch.stack([softmax(logs) for logs in logits], dim=1)
            # Apply attention
            preds = torch.sum(preds * attn, dim=1)

        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            xent = nn.CrossEntropyLoss()
            loss = loss_fn(torch.log(preds), labels)

            outputs = (loss,) + outputs
        return outputs


class MultiViewTransformerNetworkSelectiveWeight(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, multi_xformer: MultiTransformerClassifier, shared_bert: VanillaBert, n_classes: int = 2):
        super(MultiViewTransformerNetworkSelectiveWeight, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.multi_xformer_classifiers = multi_xformer.classifier
        self.shared_bert = shared_bert.bert

        self.d_model = multi_xformer.d_model
        self.dim_param = nn.Parameter(torch.FloatTensor([multi_xformer.d_model]), requires_grad=False)
        self.n_domains = multi_xformer.n_domains
        self.n_classes = n_classes
        # Default weight is averaging
        self.weights = [1./(self.n_domains + 1)] * (self.n_domains + 1)

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x dim
        outputs = self.shared_bert(input_ids, attention_mask=attention_mask)
        logits_shared = outputs[0]

        softmax = nn.Softmax()
        logits_private = [self.multi_xformer_classifiers[d](pooled_outputs[d]) for d in range(self.n_domains)]

        if domains is not None and self.training:
            logits = [l for j, l in enumerate(logits_private) if j != domains[0]] + [logits_shared]
            attn = 1 / len(logits)
        else:
            logits = logits_private + [logits_shared]
            attn = torch.cuda.FloatTensor(self.weights).view(1,-1,1)

        # b x n_dom(+1) x nclasses
        preds = torch.stack([softmax(logs) for logs in logits], dim=1)
        # Apply attention
        preds = torch.sum(preds * attn, dim=1)
        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            loss = (0.5) * loss_fn(torch.log(preds), labels)
            # Strong supervision on in domain
            if domains is not None:
                domain = domains[0]
                domain_logits = logits_private[domain]
                xent = nn.CrossEntropyLoss()
                loss += (0.5) * xent(domain_logits, labels)

            outputs = (loss,) + outputs
        if ret_alpha:
            outputs += (torch.cuda.FloatTensor([attn]).expand_as(preds),)
        return outputs


class MultiViewTransformerNetworkProbabilities(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, multi_xformer: MultiTransformerClassifier, shared_bert: VanillaBert, n_classes: int = 2):
        super(MultiViewTransformerNetworkProbabilities, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.multi_xformer_classifiers = multi_xformer.classifier
        self.shared_bert = shared_bert.bert

        self.d_model = multi_xformer.d_model
        self.dim_param = nn.Parameter(torch.FloatTensor([multi_xformer.d_model]), requires_grad=False)
        self.n_domains = multi_xformer.n_domains
        self.n_classes = n_classes

        # Query matrix
        self.Q = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)
        # Key matrix
        self.K = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)
        # Value matrix
        self.V = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)

        # Main classifier
        #self.task_classifier = nn.Linear(multi_xformer.d_model, n_classes)
        # TODO: Introduce aux tasks if needed

        nn.init.xavier_uniform_(self.Q)
        nn.init.xavier_uniform_(self.K)
        nn.init.xavier_uniform_(self.V)

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x dim
        outputs = self.shared_bert(input_ids, attention_mask=attention_mask)
        shared_output = outputs[1][-1][:,0,:]
        logits_shared = outputs[0]

        # Values b x n_domain (+ 1) x dim
        if domains is not None and self.training:
            attend_values = [p for j, p in enumerate(pooled_outputs) if j != domains[0]]
            v = torch.stack(attend_values + [shared_output], dim=1)
        else:
            v = torch.stack(pooled_outputs + [shared_output], dim=1)

        # Queries b x dim
        q = shared_output @ self.Q
        # Keys b*(n_domain + 1) x dim
        k = v.view(-1, self.d_model) @ self.K
        # Attention (scaled dot product) b x (n_domain + 1)
        attn = torch.sum(k.view(-1, v.shape[1], self.d_model) * q.unsqueeze(1), dim=-1) / torch.sqrt(
            self.dim_param)
        attn = nn.Softmax(dim=-1)(attn)

        softmax = nn.Softmax()
        logits_private = [self.multi_xformer_classifiers[d](pooled_outputs[d]) for d in range(self.n_domains)]

        if domains is not None and self.training:
            logits = [l for j, l in enumerate(logits_private) if j != domains[0]] + [logits_shared]
        else:
            logits = logits_private + [logits_shared]

        # b x n_dom(+1) x nclasses
        preds = torch.stack([softmax(logs) for logs in logits], dim=1)
        # Apply attention
        preds = torch.sum(preds * attn.unsqueeze(-1), dim=1)
        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            loss = (0.5) * loss_fn(torch.log(preds), labels)
            # Strong supervision on in domain
            if domains is not None:
                domain = domains[0]
                domain_logits = logits_private[domain]
                xent = nn.CrossEntropyLoss()
                loss += (0.5) * xent(domain_logits, labels)

            outputs = (loss,) + outputs
        if ret_alpha:
            outputs += (attn,)
        return outputs


class MultiViewTransformerNetworkProbabilitiesAdversarial(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, multi_xformer: MultiTransformerClassifier, shared_bert: VanillaBert, n_classes: int = 2, supervision_layer: int = 12):
        super(MultiViewTransformerNetworkProbabilitiesAdversarial, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.multi_xformer_classifiers = multi_xformer.classifier
        self.shared_bert = shared_bert.bert
        self.d_model = multi_xformer.d_model

        # Add one extra for the target data
        self.domain_classifier = nn.Linear(self.d_model, multi_xformer.n_domains + 1)
        self.supervision_layer = supervision_layer

        self.dim_param = nn.Parameter(torch.FloatTensor([multi_xformer.d_model]), requires_grad=False)
        self.n_domains = multi_xformer.n_domains
        self.n_classes = n_classes

        # Query matrix
        self.Q = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)
        # Key matrix
        self.K = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)
        # Value matrix
        self.V = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)

        # Main classifier
        #self.task_classifier = nn.Linear(multi_xformer.d_model, n_classes)
        # TODO: Introduce aux tasks if needed

        nn.init.xavier_uniform_(self.Q)
        nn.init.xavier_uniform_(self.K)
        nn.init.xavier_uniform_(self.V)

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x dim
        outputs = self.shared_bert(input_ids, attention_mask=attention_mask)
        divisor = min(1, 2 * (len(outputs[1]) - self.supervision_layer))
        shared_output = outputs[1][-1][:,0,:]
        logits_shared = outputs[0]

        # Domain adversarial bit
        domain_supervision_layer = outputs[1][self.supervision_layer][:,0,:]
        adv_input = GradientReversal.apply(domain_supervision_layer)
        adv_logits = self.domain_classifier(adv_input)

        # Values b x n_domain (+ 1) x dim
        if domains is not None and self.training:
            attend_values = [p for j, p in enumerate(pooled_outputs) if j != domains[0]]
            v = torch.stack(attend_values + [shared_output], dim=1)
        else:
            v = torch.stack(pooled_outputs + [shared_output], dim=1)
        # Queries b x dim
        q = shared_output @ self.Q
        # Keys b*(n_domain + 1) x dim
        k = v.view(-1, self.d_model) @ self.K
        # Attention (scaled dot product) b x (n_domain + 1)
        attn = torch.sum(k.view(-1, v.shape[1], self.d_model) * q.unsqueeze(1), dim=-1) / torch.sqrt(
            self.dim_param)
        attn = nn.Softmax(dim=-1)(attn)

        softmax = nn.Softmax()
        logits_private = [self.multi_xformer_classifiers[d](pooled_outputs[d]) for d in range(self.n_domains)]
        if domains is not None and self.training:
            logits = [l for j, l in enumerate(logits_private) if j != domains[0]] + [logits_shared]
        else:
            logits = logits_private + [logits_shared]

        # b x n_dom+1 x nclasses
        preds = torch.stack([softmax(logs) for logs in logits], dim=1)
        # Apply attention
        preds = torch.sum(preds * attn.unsqueeze(-1), dim=1)
        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            loss = 0.5*loss_fn(torch.log(preds), labels)
            if domains is not None:
                domain = domains[0]
                xent = nn.CrossEntropyLoss()
                domain_logits = logits_private[domain]
                loss += 0.5*xent(domain_logits, labels)
                # Scale the adversarial loss depending on how deep in the network it is
                loss += (1e-3 / divisor) * xent(adv_logits, domains)

            outputs = (loss,) + outputs
        # For unsupervised adversarial loss
        elif domains is not None:
            domain = domains[0]
            xent = nn.CrossEntropyLoss()
            # Scale the adversarial loss depending on how deep in the network it is
            loss = (1e-3 / divisor) * xent(adv_logits, domains)
            outputs = (loss,) + outputs

        if ret_alpha:
            outputs += (attn,)
        return outputs


class MultiViewTransformerNetworkDomainClassifierAttention(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, multi_xformer: MultiTransformerClassifier, shared_bert: VanillaBert, n_classes: int = 2):
        super(MultiViewTransformerNetworkDomainClassifierAttention, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.multi_xformer_classifiers = multi_xformer.classifier
        self.shared_bert = shared_bert

        self.d_model = multi_xformer.d_model
        self.dim_param = nn.Parameter(torch.FloatTensor([multi_xformer.d_model]), requires_grad=False)
        self.n_domains = multi_xformer.n_domains
        self.n_classes = n_classes

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x n_domains
        logits_shared = self.shared_bert(input_ids, attention_mask=attention_mask)[0]
        attn = nn.Softmax(dim=-1)(logits_shared)

        softmax = nn.Softmax(dim=-1)
        logits = [self.multi_xformer_classifiers[d](pooled_outputs[d]) for d in range(self.n_domains)]

        # attend to classifiers based on the output of the domain classifier
        # b x n_dom x nclasses
        preds = torch.stack([softmax(logs) for logs in logits], dim=1)
        # Apply attention
        preds = torch.sum(preds * attn.unsqueeze(-1), dim=1)

        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            loss = loss_fn(torch.log(preds), labels)

            outputs = (loss,) + outputs
        if ret_alpha:
            outputs += (attn,)
        return outputs


class MultiViewTransformerNetworkDomainAdversarial(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """
    def __init__(self, multi_xformer: MultiTransformerClassifier, shared_bert: VanillaBert, n_classes: int = 2, n_domains: int = 3, supervision_layer: int = 12):
        super(MultiViewTransformerNetworkDomainAdversarial, self).__init__()

        self.multi_xformer = multi_xformer.multi_xformer
        self.shared_bert = shared_bert.bert.bert

        self.d_model = multi_xformer.d_model
        self.dim_param = nn.Parameter(torch.FloatTensor([multi_xformer.d_model]), requires_grad=False)
        self.n_xformers = multi_xformer.n_domains
        self.n_classes = n_classes
        self.n_domains = n_domains
        self.supervision_layer = supervision_layer

        # Query matrix
        self.Q = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)
        # Key matrix
        self.K = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)
        # Value matrix
        self.V = nn.Parameter(torch.randn((multi_xformer.d_model, multi_xformer.d_model)), requires_grad=True)

        # Main classifier
        self.domain_classifier = nn.Linear(self.d_model, n_domains)
        self.task_classifier = nn.Linear(multi_xformer.d_model, n_classes)
        # TODO: Introduce aux tasks if needed

        nn.init.xavier_uniform_(self.Q)
        nn.init.xavier_uniform_(self.K)
        nn.init.xavier_uniform_(self.V)

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            ret_alpha: bool = False
    ):
        # Get all the pooled outputs
        # (n_domain) b x dim
        pooled_outputs = self.multi_xformer(input_ids, attention_mask)
        # b x dim
        outputs = self.shared_bert(input_ids, attention_mask=attention_mask)
        shared_output = outputs[1][:,0,:]
        divisor = min(1, 2 * (len(outputs[1]) - self.supervision_layer))

        # Values b x n_domain + 1 x dim
        v = torch.stack(pooled_outputs + [shared_output], dim=1)
        # Queries b x dim
        q = shared_output @ self.Q
        # Keys b*(n_domain + 1) x dim
        k = v.view(-1, self.d_model) @ self.K
        # Attention (scaled dot product) b x (n_domain + 1)
        attn = torch.sum(k.view(-1, self.n_xformers + 1, self.d_model) * q.unsqueeze(1), dim=-1) / torch.sqrt(self.dim_param)
        attn = nn.Softmax(dim=-1)(attn)
        v = v.view(-1, self.d_model) @ self.V
        v = v.view(-1, self.n_xformers + 1, self.d_model)
        # Attend to the values b x dim
        o = torch.sum(attn.unsqueeze(-1) * v, dim=1)

        # Classifier
        logits = self.task_classifier(o)

        # Domain adversarial bit
        domain_supervision_layer = outputs[2][self.supervision_layer][:,0,:].squeeze()
        adv_input = GradientReversal.apply(domain_supervision_layer)
        adv_logits = self.domain_classifier(adv_input)

        outputs = (logits,)

        loss_fn = nn.CrossEntropyLoss()
        if domains is not None:
            # Scale the adversarial loss depending on how deep in the network it is
            loss = (1e-3 / divisor) * loss_fn(adv_logits, domains)
            if labels is not None:
                loss += loss_fn(logits, labels)
            outputs = (loss,) + outputs
        elif labels is not None:
            loss = loss_fn(logits, labels)
            outputs = (loss,) + outputs
        if ret_alpha:
            outputs += (attn,)

        return outputs


class MultiViewCNNAveragingIndividuals(nn.Module):
    """
    Multi-view transformer network for domain adaptation
    """

    def __init__(self, args: Namespace, embeddings: np.array, n_domains: int = 2, n_classes: int = 2):
        super(MultiViewCNNAveragingIndividuals, self).__init__()

        self.domain_experts = nn.ModuleList([NLICNN(embeddings, args, n_classes)]*n_domains)
        self.shared_model = NLICNN(embeddings, args, n_classes)

        self.n_domains = n_domains
        self.n_classes = n_classes

        # Default weight is averaging
        self.weights = [1. / (self.n_domains + 1)] * (self.n_domains + 1)

        self.average = False

    def forward(
            self,
            input_ids: torch.LongTensor,
            attention_mask: torch.LongTensor,
            domains: torch.LongTensor = None,
            labels: torch.LongTensor = None,
            return_logits: bool = False
    ):

        logits_shared = self.shared_model(input_ids, attention_mask=attention_mask)

        softmax = nn.Softmax()

        if not self.average:
            if domains is not None:
                logits = self.domain_experts[domains[0]](input_ids, attention_mask=attention_mask)
                # b x n_dom(+1) x nclasses
                preds = softmax(logits)
            else:
                logits = logits_shared
                # b x n_dom(+1) x nclasses
                preds = softmax(logits)
        else:
            logits_private = [self.domain_experts[d](input_ids, attention_mask=attention_mask) for d in
                              range(self.n_domains)]
            logits = logits_private + [logits_shared]
            if return_logits:
                return logits
            attn = torch.cuda.FloatTensor(self.weights).view(1, -1, 1)
            # b x n_dom(+1) x nclasses
            preds = torch.stack([softmax(logs) for logs in logits], dim=1)
            # Apply attention
            preds = torch.sum(preds * attn, dim=1)

        outputs = (preds,)
        if labels is not None:
            # LogSoftmax + NLLLoss
            loss_fn = nn.NLLLoss()
            xent = nn.CrossEntropyLoss()
            loss = loss_fn(torch.log(preds), labels) + xent(logits_shared, labels)

            outputs = (loss,) + outputs
        return outputs


_glove_path = "glove.6B.{}d.txt".format


def _get_glove_embeddings(embedding_dim: int, glove_dir: str):
    word_to_index = {}
    word_vectors = []

    with open(os.path.join(glove_dir, _glove_path(embedding_dim))) as fp:
        for line in tqdm(fp.readlines(), desc=f'Loading Glove embeddings {_glove_path}'):
            line = line.split(" ")

            word = line[0]
            word_to_index[word] = len(word_to_index)

            vec = np.array([float(x) for x in line[1:]])
            word_vectors.append(vec)

    return word_to_index, word_vectors


def get_embeddings(embedding_dim: int, embedding_dir: str, tokenizer: PreTrainedTokenizer):
    """
    :return: a tensor with the embedding matrix - ids of words are from vocab
    """
    word_to_index, word_vectors = _get_glove_embeddings(embedding_dim, embedding_dir)

    embedding_matrix = np.zeros((len(tokenizer), embedding_dim))

    for id in range(0, max(tokenizer.vocab.values())+1):
        word = tokenizer.ids_to_tokens[id]
        if word not in word_to_index:
            word_vector = np.random.rand(embedding_dim)
        else:
            word_vector = word_vectors[word_to_index[word]]

        embedding_matrix[id] = word_vector

    return torch.nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float), requires_grad=True)


class NLICNN(torch.nn.Module):
    def __init__(self, embeddings: np.array, args: Namespace, n_labels: int):
        super(NLICNN, self).__init__()
        self.args = args

        self.embedding = torch.nn.Embedding(embeddings.shape[0], embeddings.shape[1])
        self.embedding.weight = torch.nn.Parameter(torch.tensor(embeddings, dtype=torch.float), requires_grad=True)

        self.dropout = torch.nn.Dropout(args.dropout)

        self.conv_layers = torch.nn.ModuleList([torch.nn.Conv2d(args.in_channels, args.out_channels,
                                                    (kernel_height, embeddings.shape[1]),
                                                    args.stride, args.padding)
                            for kernel_height in args.kernel_heights])

        output_units = n_labels #if n_labels > 2 else 1
        self.final = torch.nn.Linear(len(args.kernel_heights) * args.out_channels, output_units)

    def conv_block(self, input, conv_layer):
        conv_out = conv_layer(input)  # conv_out.size() = (batch_size, out_channels, dim, 1)
        activation = F.relu(conv_out.squeeze(3))  # activation.size() = (batch_size, out_channels, dim1)
        max_out = F.max_pool1d(activation, activation.size()[2]).squeeze(2)  # maxpool_out.size() = (batch_size, out_channels)

        return max_out

    def forward(self, input, attention_mask):
        input = self.embedding(input) * attention_mask.unsqueeze(-1) # Zero out padding
        # input.size() = (batch_size, num_seq, embedding_length)
        input = input.unsqueeze(1)
        # input.size() = (batch_size, 1, num_seq, embedding_length)
        input = self.dropout(input)

        conv_out = [self.conv_block(input, self.conv_layers[i]) for i in range(len(self.conv_layers))]
        all_out = torch.cat(conv_out, 1)
        # all_out.size() = (batch_size, num_kernels*out_channels)
        fc_in = self.dropout(all_out)
        # fc_in.size()) = (batch_size, num_kernels*out_channels)
        output = self.final(fc_in)
        return output