from functools import partial
from typing import Optional
import torch
import torch.nn as nn

class ConcatHandshaking(nn.Module):

    def __init__(self, hidden_size, inner_encoder_type:Optional[str]=None, **kwargs):
        
        self._hidden_size = hidden_size
        self._inner_encoder_type = inner_encoder_type
        
        super(ConcatHandshaking, self).__init__()
        
        # 是否添加编码层
        if isinstance(self._inner_encoder_type, str):
            self.inner_encoder = InnerEncodeLayer(self._hidden_size, self._inner_encoder_type)
            encoder_out_dim = self._hidden_size*(2+(2 if "bi" in self._inner_encoder_type else 1))
            self.linear_projector = nn.Linear(encoder_out_dim, self._hidden_size)
        else:
            self.linear_projector = nn.Linear(self._hidden_size*2, self._hidden_size)
        
        return None

    def forward(self, hiddens, **kwargs):
        """Handshaking with Condat.
        Args:
            hidden: Tensor, shape (batch_size, seq_len, hidden_size)

        Returns:
            handshaking_hiddens: Tensor, shape (batch_size, 1+2+...+seq_len, hidden_size)
        """
        seq_len = hiddens.size()[1]
        handshaking_hiddens_list = []
        for i in range(seq_len):
            _h = hiddens[:, i, :]
            repeat_hiddens = _h[:, None, :].repeat(1, seq_len-i, 1)
            visibl_hiddens = hiddens[:, i:, :]
            
            shaking_hiddens_list = [repeat_hiddens, visibl_hiddens]
            if self._inner_encoder_type is not None:
                inner_context = self.inner_encoder(visibl_hiddens)
                shaking_hiddens_list.append(inner_context)
            shaking_hiddens = torch.cat(shaking_hiddens_list, dim=2)
            
            shaking_hiddens = self.linear_projector(shaking_hiddens)
            shaking_hiddens_act = torch.tanh(shaking_hiddens)
            handshaking_hiddens_list.append(shaking_hiddens_act)
        handshaking_hiddens = torch.cat(handshaking_hiddens_list, dim=1)
        return handshaking_hiddens


from .general import ConditionalLayerNorm
class ClnHandshaking(nn.Module):

    def __init__(self, hidden_size, inner_encoder_type:Optional[str]=None, **kwargs):
        
        self._hidden_size = hidden_size
        self._inner_encoder_type = inner_encoder_type
        
        super(ClnHandshaking, self).__init__()
        
        self.shaking_cln = ConditionalLayerNorm(self._hidden_size, self._hidden_size)
        
        # 是否添加编码层
        if isinstance(self._inner_encoder_type, str):
            self.inner_encoder = InnerEncodeLayer(self._hidden_size, self._inner_encoder_type)
            encoder_out_dim = self._hidden_size * (2 if "bi" in self._inner_encoder_type else 1)
            self.inner_context_cln = ConditionalLayerNorm(self._hidden_size, encoder_out_dim)
        
        return None
    
    def forward(self, hiddens):
        """Handshaking with Condat.
        Args:
            hidden: Tensor, shape (batch_size, seq_len, hidden_size)

        Returns:
            handshaking_hiddens: Tensor, shape (batch_size, 1+2+...+seq_len, hidden_size)
        """
        seq_len = hiddens.size()[1]
        handshaking_hiddens_list = []
        for i in range(seq_len):
            _h = hiddens[:, i, :]
            repeat_hiddens = _h[:, None, :].repeat(1, seq_len-i, 1)
            visibl_hiddens = hiddens[:, i:, :]
            
            shaking_hiddens = self.shaking_cln(visibl_hiddens, repeat_hiddens)
            if self._inner_encoder_type is not None:
                inner_context = self.inner_encoder(visibl_hiddens)
                shaking_hiddens = self.inner_context_cln(shaking_hiddens, inner_context)
            
            handshaking_hiddens_list.append(shaking_hiddens)
        handshaking_hiddens = torch.cat(handshaking_hiddens_list, dim=1)
        return handshaking_hiddens


class InnerEncodeLayer(nn.Module):
    def __init__(self, hidden_size, encoder_type="lstm") -> None:
        self._hidden_size = hidden_size
        self._encoder_type = encoder_type
        self._str2func = {
                "max_pooling":self.max_pooling,
                "mean_pooling":self.mean_pooling,
            }
        super(InnerEncodeLayer, self).__init__()
        
        if "mix" in self._encoder_type:
            self._theta = nn.parameter.Parameter(torch.rand(self._hidden_size))
            self._str2func["mix_pooling"] = partial(self.mix_pooling, theta=self._theta)
        
        if "lstm" in self._encoder_type:
            self.lstm_encoder = nn.LSTM(self._hidden_size, self._hidden_size, num_layers=1, 
                                        bidirectional="bi" in self._encoder_type, batch_first=True)
        
        if "pooling" in self._encoder_type:
            self.pooling_encoder = self._str2func[self._encoder_type]
        
    def forward(self, hiddens):
        
        if "lstm" in self._encoder_type:
            encoded_context, _ = self.lstm_encoder(hiddens)
        elif "pooling" in self._encoder_type:
            encoded_context = torch.stack([self.pooling_encoder(hiddens[:, :i+1, :]) for i in range(hiddens.size()[1])], dim = 1)

        return encoded_context
    
    @staticmethod
    def max_pooling(inputs):
        return torch.max(inputs, dim=1)[0]

    @staticmethod
    def mean_pooling(inputs):
        return torch.mean(inputs, dim=1)

    @staticmethod
    def mix_pooling(inputs, theta):
        return theta*torch.max(inputs, dim=1)[0] + (1-theta)*torch.mean(inputs, dim=1)