"""
Definition of the SSEA model
"""
import torch
import torch.nn as nn
from torch.nn.modules.activation import Sigmoid
import treelstm

from .layers import RNNDropout, Seq2SeqEncoder, SoftmaxAttention,FullFusion,SoftmaxAttention
from .utils import get_mask, masked_softmax_coda_attention, replace_masked,weighted_sum
from treelstm import TreeLSTM
class SSEA(nn.Module):
    """
    Implementation of the SSEA model presented in the paper "Enhanced LSTM for
    Natural Language Inference" by Chen et al.
    """

    def __init__(self,
                 vocab_size,
                 embedding_dim,
                 hidden_size,
                 embeddings=None,
                 padding_idx=0,
                 dropout=0.5,
                 num_classes=3,
                 batch_size=32,
                 device="gpu"):
        """
        Args:
            vocab_size: The size of the vocabulary of embeddings in the model.
            embedding_dim: The dimension of the word embeddings.
            hidden_size: The size of all the hidden layers in the network.
            embeddings: A tensor of size (vocab_size, embedding_dim) containing
                pretrained word embeddings. If None, word embeddings are
                initialised randomly. Defaults to None.
            padding_idx: The index of the padding token in the premises and
                hypotheses passed as input to the model. Defaults to 0.
            dropout: The dropout rate to use between the layers of the network.
                A dropout rate of 0 corresponds to using no dropout at all.
                Defaults to 0.5.
            num_classes: The number of classes in the output of the network.
                Defaults to 3.
            device: The name of the device on which the model is being
                executed. Defaults to 'cpu'.
        """
        super(SSEA, self).__init__()
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.hidden_size = hidden_size
        self.num_classes = num_classes
        self.dropout = dropout
        self.device = device
        self._word_embedding = nn.Embedding(self.vocab_size,
                                            self.embedding_dim,
                                            padding_idx=padding_idx,
                                            _weight=embeddings)
        if self.dropout:
            # self._rnn_dropout = RNNDropout(p=self.dropout)
            self._rnn_dropout = nn.Dropout(p=self.dropout)
        self._encoding = Seq2SeqEncoder(nn.LSTM,
                                        self.embedding_dim,
                                        self.hidden_size,
                                        bidirectional=True)
        self._encoding_tree=TreeLSTM(self.embedding_dim,self.hidden_size*2)                             
        self._projection = nn.Sequential(nn.Linear(8*self.hidden_size,
                                                   self.hidden_size),
                                         nn.ReLU()                                                                                                      
                                         )
     
        self._composition = Seq2SeqEncoder(nn.LSTM,
                                           self.hidden_size,
                                           self.hidden_size,
                                           bidirectional=True)

        self._classification = nn.Sequential(nn.Dropout(p=self.dropout),
                                             nn.Linear(8*self.hidden_size,
                                                       self.hidden_size),
                                             nn.Tanh(),
                                             nn.Dropout(p=self.dropout),
                                             nn.Linear(self.hidden_size,
                                                       self.num_classes))

        self.apply(_init_ssea_weights)
    def forward(self,
                premises,               
               
                premises_adjacency_list,
                premises_lengths,
                premises_tree_size,
                hypotheses,
              
                hypotheses_adjacency_list,
                hypotheses_lengths,
                hypotheses_tree_size):                
        """
        Args:
            premises: A batch of varaible length sequences of word indices
                representing premises. The batch is assumed to be of size
                (batch, premises_length).
            premises_lengths: A 1D tensor containing the lengths of the
                premises in 'premises'.
            hypothesis: A batch of varaible length sequences of word indices
                representing hypotheses. The batch is assumed to be of size
                (batch, hypotheses_length).
            hypotheses_lengths: A 1D tensor containing the lengths of the
                hypotheses in 'hypotheses'.

        Returns:
            logits: A tensor of size (batch, num_classes) containing the
                logits for each output class of the model.
            probabilities: A tensor of size (batch, num_classes) containing
                the probabilities of each output class in the model.
        """

        premises_mask = get_mask(premises, premises_lengths).to(self.device)
        hypotheses_mask = get_mask(hypotheses, hypotheses_lengths)\
            .to(self.device)
        embedded_premises = self._word_embedding(premises)    
        embedded_hypotheses = self._word_embedding(hypotheses)
        premises_embedding= torch.cat([self._word_embedding(premises[i][0:premises_lengths[i]]) for i in range(premises.shape[0])])
        hypotheses_embedding=torch.cat([self._word_embedding(hypotheses[i][0:hypotheses_lengths[i]]) for i in range(hypotheses.shape[0])])
        if self.dropout:
            embedded_premises = self._rnn_dropout(embedded_premises)
            embedded_hypotheses = self._rnn_dropout(embedded_hypotheses)
            premises_embedding = self._rnn_dropout(premises_embedding)
            hypotheses_embedding = self._rnn_dropout(hypotheses_embedding)
        encoded_premises = self._encoding(embedded_premises,
                                        premises_lengths,self.device)
        p_h_encoded=treeencode(premises_adjacency_list)
        p_h_encoded=self._rnn_dropout(p_h_encoded) 

        encoded_hypotheses = self._encoding(embedded_hypotheses,
                                            hypotheses_lengths,self.device)    
        h_h_encoded=treeencode(hypotheses_adjacency_list)      
        h_h_encoded=self._rnn_dropout(h_h_encoded)
  
        encoded_premises_1=encoded_premises+p_h_encoded
        encoded_hypotheses = self._encoding(embedded_hypotheses,
                                        hypotheses_lengths,self.device)
        encoded_hypotheses_1=encoded_hypotheses+h_h_encoded
        sim_cos=encoded_premises_1.bmm(encoded_hypotheses_1.transpose(2,1))
        sim_cos=sim_cos
        sim_dis=EuclideanDistances(encoded_premises,encoded_hypotheses)
        premises_mask_sim=premises_mask.unsqueeze(-1)
        hypotheses_mask_sim=hypotheses_mask.unsqueeze(1)
        premises_mask_sim = premises_mask_sim.expand_as(sim_dis).contiguous().float() 
        hypotheses_mask_sim = hypotheses_mask_sim.expand_as(sim_dis).contiguous().float()   
        sim_dis=torch.mul(sim_dis,premises_mask_sim)
        sim_dis=torch.mul(sim_dis,hypotheses_mask_sim)
        sim_E_N=sim_cos-sim_dis
        prem_hyp_attn = masked_softmax_coda_attention(sim_E_N, hypotheses_mask)
        hyp_prem_attn = masked_softmax_coda_attention(sim_E_N.transpose(1, 2)
                                                        .contiguous(),
                                            premises_mask)   
        attended_hypotheses=weighted_sum(encoded_premises,
                                         hyp_prem_attn,
                                         hypotheses_mask)
        attended_premises=weighted_sum(encoded_hypotheses,
                                         prem_hyp_attn,
                                         premises_mask)       

        enhanced_premises = torch.cat([encoded_premises,
                                       attended_premises,
                                       encoded_premises - attended_premises,
                                       encoded_premises * attended_premises],
                                      dim=-1)
        enhanced_hypotheses = torch.cat([encoded_hypotheses,
                                         attended_hypotheses,
                                         encoded_hypotheses -
                                         attended_hypotheses,
                                         encoded_hypotheses *
                                         attended_hypotheses],
                                        dim=-1)
        projected_premises = self._projection(enhanced_premises)      
        projected_hypotheses = self._projection(enhanced_hypotheses)      
        if self.dropout:
            projected_premises = self._rnn_dropout(projected_premises)
            projected_hypotheses = self._rnn_dropout(projected_hypotheses)
        v_ai = self._composition(projected_premises,premises_tree_size,self.device)
        v_bj = self._composition(projected_hypotheses, hypotheses_tree_size,self.device)
        v_ai_1=torch.softmax(v_ai,dim=1)
        v_bj_1=torch.softmax(v_bj,dim=1)
        v_ai_2=torch.mul(v_ai,v_ai_1)
        v_bj_2=torch.mul(v_bj,v_bj_1)
        v_ai_2=torch.sum(v_ai_2 * premises_mask.unsqueeze(1)
                                                .transpose(2, 1), dim=1)
        v_bj_2=torch.sum(v_bj_2 * hypotheses_mask.unsqueeze(1)
                                                  .transpose(2, 1), dim=1)
        v_a_max, _ = replace_masked(v_ai, premises_mask, -1e7).max(dim=1)
        v_b_max, _ = replace_masked(v_bj, hypotheses_mask, -1e7).max(dim=1)
        v = torch.cat([v_a_max,v_b_max,v_ai_2 , v_bj_2 ], dim=1)
        logits = self._classification(v)
        probabilities = torch.softmax(logits,dim=-1)
        return logits, probabilities
def EuclideanDistances(a,b):
    '''Peers can implement their own according to their needs'''
def treeencode(a):
    '''Peers can implement their own according to their needs'''

def _init_ssea_weights(module):
    """
    Initialise the weights of the ssea model.
    """
    if isinstance(module, nn.Linear):
        nn.init.xavier_uniform_(module.weight.data)
        # nn.init.constant_(module.bias.data, 0.0)

    elif isinstance(module, nn.LSTM):
        nn.init.xavier_uniform_(module.weight_ih_l0.data)
        nn.init.orthogonal_(module.weight_hh_l0.data)
        nn.init.constant_(module.bias_ih_l0.data, 0.0)
        nn.init.constant_(module.bias_hh_l0.data, 0.0)
        hidden_size = module.bias_hh_l0.data.shape[0] // 4
        module.bias_hh_l0.data[hidden_size:(2*hidden_size)] = 1.0

        if (module.bidirectional):
            nn.init.xavier_uniform_(module.weight_ih_l0_reverse.data)
            nn.init.orthogonal_(module.weight_hh_l0_reverse.data)
            nn.init.constant_(module.bias_ih_l0_reverse.data, 0.0)
            nn.init.constant_(module.bias_hh_l0_reverse.data, 0.0)
            module.bias_hh_l0_reverse.data[hidden_size:(2*hidden_size)] = 1.0
