from turtle import shape
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Union
import math
from mixture_of_experts import MoE
from mixture_of_experts import HeirarchicalMoE
from soft_moe_pytorch import SoftMoE
from soft_moe_pytorch import DynamicSlotsSoftMoE
from st_moe_pytorch import SparseMoEBlock
from st_moe_pytorch import MoE as StMoE


class Transformer(nn.Module):
    """Transformer model from Attention is All You Need.
    A classic transformer model adapted for sequential data.
    Embedding has been replaced with a fully connected layer,
    the last layer softmax is now a sigmoid / tanh / swish.
    Attributes:
    layers_encoding: :py:class: 'list' of :class: 'Encoder.Encoder'
        stack of Encoder layers.
    layers_decoding: :py:class: 'list' of :class:'Decoder.Decoder'
        stack of Decoder layers.
    Parameters:
    d_input:
        Model input dimension.
    d_model:
        Dimension of the input vector.
    d_output:
        Model output dimension.
    q:
        Dimension of queries and keys.
    v:
        Dimension of values.
    h:
        Number of heads.
    N:
        Number of encoder and decoder layers to stack.
    attention_size:
        Number of backward elements to apply attention.
        Deactivated if 'None'. Default is 'None'.
    dropout:
        Dropout probability after each MHA or PFF block.
        Default is '0.3'.
    chunk_mode:
        Switch between different MultiHeadAttention blocks.
        One of 'chunk', 'window' or 'None'. Default is 'chunk'.
    pe:
        Type of positional encoding to add.
        Must be one of 'original', 'regular' or "None". Default is 'None'.
    pe_period:
        If using the 'regular' pe, then we can define the period. Default is '24'.
    """

    def __init__(self,
                 d_input: int,
                 d_model: int,
                 d_output: int,
                 q: int,
                 v: int,
                 h: int,
                 N: int,
                 attention_size: int = None,
                 dropout: float = 0.2,
                 chunk_mode: str = 'chunk',
                 pe: str = None,
                 pe_period: int = 24):
        """ Create transformer structure from Encoder and Decoder blocks. """
        super().__init__()

        self._d_model = d_model

        self.layers_encoding = nn.ModuleList([Encoder(d_model,
                                                      q,
                                                      v,
                                                      h,
                                                      attention_size=attention_size,
                                                      dropout=dropout,
                                                      chunk_mode=chunk_mode) for _ in range(N)]
                                             )
        self.layers_decoding = nn.ModuleList([Decoder(d_model,
                                                      q,
                                                      v,
                                                      h,
                                                      attention_size=attention_size,
                                                      dropout=dropout,
                                                      chunk_mode=chunk_mode) for _ in range(N)]
                                             )

        self._layerNorm1 = nn.LayerNorm(d_input)
        
        ### input dims connect output dims through d_model

        self._embedding = nn.Linear(d_input, d_model)

        self._linear = nn.Linear(d_model, d_output)

        self._linear2 = nn.Linear(d_model, d_input)

        pe_functions = {
            'original': generate_original_PE,
            'regular': generate_regular_PE,
        }

        if pe in pe_functions.keys():
            self._generate_PE = pe_functions[pe]
            self._pe_period = pe_period
        elif pe is None:
            self._generate_PE = None
        else:
            raise NameError(
                f'PE "{pe}" not understood. Must be one of {", ".join(pe_functions.keys())} or None.')


        self.name = 'TransMoE'

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Propagate input through transformer
        Forward input through an embedding module,
        the encoder then decoder stacks, and an output module.
        Parameters:
        x:
            :class: 'torch.Tensor' of shape (batch_size, K, d_input).

        Returns:
            Output tensor with shape (batch_size, K, d_output).
        """
        # get length of K
        K = x.shape[1]

        # Embedding module, Linear transformation
        encoding = self._embedding(x)

        # Add position encoding
        if self._generate_PE is not None:
            pe_params = {'period': self._pe_period} if self._pe_period else {}
            positional_encoding = self._generate_PE(K, self._d_model, **pe_params)
            positional_encoding = positional_encoding.to(encoding.device)
            encoding.add_(positional_encoding)

        # Encoding stack
        for layer in self.layers_encoding:
            encoding = layer(encoding)

        # Decoding stack, transfer encoding to decoding
        decoding = encoding

        # Add position encoding
        if self._generate_PE is not None:
            positional_encoding = self._generate_PE(K, self._d_model)
            positional_encoding = positional_encoding.to(decoding.device)
            decoding.add_(positional_encoding)

        for layer in self.layers_decoding:
            decoding = layer(decoding, encoding)

        # Output module
        output = self._linear(decoding)
        # use differnet activation function have a slightly difference
        output = torch.tanh(output)
        ### output = output * torch.sigmoid(output)
        return output


# Encoder class
class Encoder(nn.Module):
    """Encoder block from Attention is All You Need.
    Apply Multi Head Attention block followed by a Point-wise Feed Forward block.
    Residual sum and normalization are applied at each step.

    Parameters:
    d_model:
        Dimension of the input vector.
    q:
        Dimension of all query matrix.
    v:
        Dimension of all value matrix.
    h:
        Number of heads.
    attention_size:
        Number of backward elements to apply attention.
        Deactivated if 'None'. Default is 'None'.
    dropout:
        Dropout probability after each MHA or PFF block.
        Default is '0.2'.
    chunk_mode:
        Swict between different MultiHeadAttention blocks.
        One of 'chunk', 'window' or 'None'. Default is 'chunk'.
    """

    def __init__(self,
                 d_model: int,
                 q: int,
                 v: int,
                 h: int,
                 attention_size: int = None,
                 dropout: float = 0.2,
                 chunk_mode: str = 'chunk'):
        """Initialize the Encoder block"""
        super().__init__()

        chunk_mode_modules = {
            'chunk': MultiHeadAttentionChunk,
            'window': MultiHeadAttentionWindow,
        }

        if chunk_mode in chunk_mode_modules.keys():
            MHA = chunk_mode_modules[chunk_mode]
        elif chunk_mode is None:
            MHA = MultiHeadAttention
        else:
            raise NameError(
                f'chunk_mode "{chunk_mode}" not understood. Must be one of {", ".join(chunk_mode_modules.keys())} or None.')
     
        
        self._selfAttention = MHA(d_model, q, v, h, attention_size=attention_size)
        self._feedForward = PositionwiseFeedForward(d_model)

        self._layerNorm1 = nn.LayerNorm(d_model)
        self._layerNorm2 = nn.LayerNorm(d_model)
        self._layerNorm3 = nn.LayerNorm(d_model)
        self._layerNorm4 = nn.LayerNorm(d_model)

        self._dopout = nn.Dropout(p=dropout)

        ## 20231216 use MoE layers 
        self._moe = HeirarchicalMoE(dim = d_model, num_experts = (4, 4))
        self._feedForward2 = PositionwiseFeedForward(d_model)
        
        ## 20231218 use softmoe
        self._softmoe = SoftMoE(dim = 512,
                                seq_len =128,
                                num_experts =6
                                )
        ## use DynamicSlotsSoftMoE
        self._dynamicslots_softmoe= DynamicSlotsSoftMoE(dim = 512, # model dimensions
                                    num_experts = 4,   # number of experts
                                    geglu = True
                                    )
        
        ## 20231219 st_moe_pytorch, SparseMoEBlock
        self._st_moe = StMoE(
            dim = 512,
            num_experts = 8,    # increase the experts (# parameters) of your model without increasing computation
            gating_top_n = 2,   # default to top 2 gating, but can also be more (3 was tested in the paper with a lower threshold)
            threshold_train = 0.2, # at what threshold to accept a token to be routed to second expert and beyond - 0.2 was optimal 
                                   #for 2 expert routing, and apparently should be lower for 3
            threshold_eval = 0.2,
            capacity_factor_train = 1.25,  # experts have fixed capacity per batch. we need some extra capacity in case gating is not perfectly balanced.
            capacity_factor_eval = 2.,  # capacity_factor_* should be set to a value >=1
            balance_loss_coef = 1e-2,   # multiplier on the auxiliary expert balancing auxiliary loss
            router_z_loss_coef = 1e-3,  # loss weight for router z-loss
            )
        
        self._moe_block = SparseMoEBlock(self._st_moe,
                                   add_ff_before = True,
                                   add_ff_after = True
                                   )
       

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Propagate the input through the Encoder block.

        Apply the Multi Head Attention block, add residual and normalize.
        Apply the Point-wise Feed Forward block, add residual and normalize.

        Parameters:
        x:
            Input tensor with shape (batch_size, K, d_model).
        Returns:
            Output tensor with shape (batch_size, K, d_model).
        """

        # Self attention
        residual = x 
        # x = self._selfAttention(query=x, key=x, value=x_embedding)
        x = self._selfAttention(query=x, key=x, value=x)
        x = self._dopout(x)
        x = self._layerNorm1(x + residual)

        # # Feed forward Layer
        residual = x
        x = self._feedForward(x)
        x = self._dopout(x)
        x = self._layerNorm2(x + residual)
        
        # ## 20231216 add moe layer for replace Feed forward Layer
        # # residual = x
        # # x, aux_loss = self._moe(x)
        # # x = self._dopout(x)
        # # x = self._layerNorm3(x + residual)

        # # # #20231216 Feed forward Layer after MoE Layer
        # # residual = x
        # # x = self._feedForward2(x)
        # # x = self._dopout(x)
        # # x = self._layerNorm4(x + residual)
        
        # ## 20231218 SoftMoE layer
        # # x = self._softmoe(x) + x
        # # x = self._dynamicslots_softmoe(x) + x
        
        # # 20231219 out, total_aux_loss, balance_loss, router_z_loss = moe_block(inputs)
        # x, total_aux_loss, balance_loss, router_z_loss  = self._moe_block(x)    
        
        return x
    

    @property
    def attention_map(self) -> torch.Tensor:
        """Attention map after a forward propagation, variable 'score' in the original paper.
        """
        return self._selfAttention.attention_map


class MultiHeadAttention(nn.Module):
    """Multi Head Attention block from Attention is All You Need.

    Given 3 inputs of shape (batch_size, K, d_model), that will be used
    to compute query, keys and values, we output a self attention
    tensor of shape (batch_size, K, d_model).

    Parameters:
    d_model:
        Dimension of the input vector.
    q:
        Dimension of all query matrix.
    v:
        Dimension of all value matrix.
    h:
        Number of heads.
    attention_size:
        Number of backward elements to apply attention.
        Deactivated if  'None'. Default is 'None'.
    """

    def __init__(self,
                 d_model: int,
                 q: int,
                 v: int,
                 h: int,
                 attention_size: int = None):
        """Initialize the Multi Head Block."""
        super().__init__()

        self._h = h
        self._attention_size = attention_size

        # Query, keys and value matrices
        self._W_q = nn.Linear(d_model, q*self._h)
        self._W_k = nn.Linear(d_model, q*self._h)
        self._W_v = nn.Linear(d_model, v*self._h)

        # Output linear function
        self._W_o = nn.Linear(self._h*v, d_model)

        # Score placeholder
        self._scores = None

        ##20231208
        self.conv1 = nn.Conv1d(in_channels= d_model, out_channels=128, kernel_size= 5, stride=1, padding= 5//2)
        self.conv2 = nn.Conv1d(in_channels=128, out_channels=128, kernel_size= 5, stride=1, padding= 5//2)
        self.conv3 = nn.Conv1d(in_channels=128, out_channels= d_model, kernel_size= 5, stride=1, padding= 5//2)
        self.activation = nn.LeakyReLU(0.2)
        # self.activation = nn.GELU()
        self._layerNorm1 = nn.LayerNorm(d_model)


    def forward(self,
                query: torch.Tensor,
                key: torch.Tensor,
                value: torch.Tensor,
                mask: Optional[str] = None) -> torch.Tensor:
        """Propagate forward the input through the MHB.
        We compute for each head the queries, keys and values matrices,
        followed by the Scaled Dot-Product. The result is concatenated 
        and returned with shape (batch_size, K, d_model).
        Parameters:
        query:
            Input tensor with shape (batch_size, K, d_model) used to compute queries.
        key:
            Input tensor with shape (batch_size, K, d_model) used to compute keys.
        value:
            Input tensor with shape (batch_size, K, d_model) used to compute values.
        mask:
            Mask to apply on scores before computing attention.
            One of 'subsequent', None. Default is None.
        Returns:
            Self attention tensor with shape (batch_size, K, d_model).
        """

        # ## 20231208 using this conv block alone without experts has better results 
        residual = value
        value = value.transpose(1, 2)
        value = self.conv1(value)
        value = self.activation(value)
        value = self.conv2(value)
        value = self.activation(value)
        value = self.conv3(value)
        value = self.activation(value)
        value = value.transpose(1, 2)
        value = self._layerNorm1(value + residual)


        K = query.shape[1]

        # Compute Q, K and V, concatenate heads on batch dimension
        queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)
        keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)
        values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)

        # Scaled Dot Product
        self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(K)

        # Compute local map mask
        if self._attention_size is not None:
            attention_mask = generate_local_map_mask(K, self._attention_size, mask_future=False, device=self._scores.device)
            self._scores = self._scores.masked_fill(attention_mask, float('-inf'))

        # Compute future mask
        if mask == "subsequent":
            future_mask = torch.triu(torch.ones((K, K)), diagonal=1).bool()
            future_mask = future_mask.to(self._scores.device)
            self._scores = self._scores.masked_fill(future_mask, float('-inf'))

        # Apply sotfmax
        self._scores = F.softmax(self._scores, dim=-1)

        attention = torch.bmm(self._scores, values)

        # Concatenat the heads
        attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)

        # Apply linear transformation W^O
        self_attention = self._W_o(attention_heads)

        return self_attention

    @property
    def attention_map(self) -> torch.Tensor:
        """Attention map after a forward propagation, variable 'score' in the original paper."""

        if self._scores is None:
            raise RuntimeError(
                "Evaluate the model once to generate attention map")
        return self._scores


class MultiHeadAttentionChunk(MultiHeadAttention):
    """Multi Head Attention block with chunk.

    Given 3 inputs of shape (batch_size, K, d_model), that will be used
    to compute query, keys and values, we output a self attention
    tensor of shape (batch_size, K, d_model).
    Queries, keys and values are divided in chunks of constant size.

    Parameters:
    d_model:
        Dimension of the input vector.
    q:
        Dimension of all query matrix.
    v:
        Dimension of all value matrix.
    h:
        Number of heads.
    attention_size:
        Number of backward elements to apply attention.
        Deactivated if ``None``. Default is ``None``.
    chunk_size:
        Size of chunks to apply attention on. Last one may be smaller (see :class:`torch.Tensor.chunk`).
        Default is 168.
    """

    def __init__(self,
                 d_model: int,
                 q: int,
                 v: int,
                 h: int,
                 attention_size: int = None,
                 chunk_size: Optional[int] = 11,
                 **kwargs):
        """Initialize the Multi Head Block."""
        super().__init__(d_model, q, v, h, attention_size, **kwargs)

        self._chunk_size = chunk_size

        # Score mask for decoder
        self._future_mask = nn.Parameter(torch.triu(torch.ones((self._chunk_size, self._chunk_size)), diagonal=1).bool(),
                                         requires_grad=False)

        if self._attention_size is not None:
            self._attention_mask = nn.Parameter(generate_local_map_mask(self._chunk_size, self._attention_size),
                                                requires_grad=False)

    def forward(self,
                query: torch.Tensor,
                key: torch.Tensor,
                value: torch.Tensor,
                mask: Optional[str] = None) -> torch.Tensor:
        """Propagate forward the input through the MHB.

        We compute for each head the queries, keys and values matrices,
        followed by the Scaled Dot-Product. The result is concatenated 
        and returned with shape (batch_size, K, d_model).

        Parameters:
        query:
            Input tensor with shape (batch_size, K, d_model) used to compute queries.
        key:
            Input tensor with shape (batch_size, K, d_model) used to compute keys.
        value:
            Input tensor with shape (batch_size, K, d_model) used to compute values.
        mask:
            Mask to apply on scores before computing attention.
            One of 'subsequent', None. Default is None.

        Returns:
            Self attention tensor with shape (batch_size, K, d_model).
        """
        K = query.shape[1]
        n_chunk = K // self._chunk_size

        # Compute Q, K and V, concatenate heads on batch dimension
        queries = torch.cat(torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0).chunk(n_chunk, dim=1), dim=0)
        keys = torch.cat(torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0).chunk(n_chunk, dim=1), dim=0)
        values = torch.cat(torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0).chunk(n_chunk, dim=1), dim=0)

        # Scaled Dot Product
        self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._chunk_size)

        # Compute local map mask
        if self._attention_size is not None:
            self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))

        # Compute future mask
        if mask == "subsequent":
            self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))

        # Apply softmax
        self._scores = F.softmax(self._scores, dim=-1)

        attention = torch.bmm(self._scores, values)

        # Concatenat the heads
        attention_heads = torch.cat(torch.cat(attention.chunk(
            n_chunk, dim=0), dim=1).chunk(self._h, dim=0), dim=-1)

        # Apply linear transformation W^O
        self_attention = self._W_o(attention_heads)

        return self_attention


class MultiHeadAttentionWindow(MultiHeadAttention):
    """Multi Head Attention block with moving window.

    Given 3 inputs of shape (batch_size, K, d_model), that will be used
    to compute query, keys and values, we output a self attention
    tensor of shape (batch_size, K, d_model).
    Queries, keys and values are divided in chunks using a moving window.

    Parameters:
    d_model:
        Dimension of the input vector.
    q:
        Dimension of all query matrix.
    v:
        Dimension of all value matrix.
    h:
        Number of heads.
    attention_size:
        Number of backward elements to apply attention.
        Deactivated if ``None``. Default is ``None``.
    window_size:
        Size of the window used to extract chunks.
        Default is 168
    padding:
        Padding around each window. Padding will be applied to input sequence.
        Default is 168 // 4 = 42.
    """

    def __init__(self,
                 d_model: int,
                 q: int,
                 v: int,
                 h: int,
                 attention_size: int = None,
                 window_size: Optional[int] = 12,
                 padding: Optional[int] = 12 // 4,
                 **kwargs):
        """Initialize the Multi Head Block."""
        super().__init__(d_model, q, v, h, attention_size, **kwargs)

        self._window_size = window_size
        self._padding = padding
        self._q = q
        self._v = v

        # Step size for the moving window
        self._step = self._window_size - 2 * self._padding

        # Score mask for decoder
        self._future_mask = nn.Parameter(torch.triu(torch.ones((self._window_size, self._window_size)), diagonal=1).bool(),
                                         requires_grad=False)

        if self._attention_size is not None:
            self._attention_mask = nn.Parameter(generate_local_map_mask(self._window_size, self._attention_size),
                                                requires_grad=False)

    def forward(self,
                query: torch.Tensor,
                key: torch.Tensor,
                value: torch.Tensor,
                mask: Optional[str] = None) -> torch.Tensor:
        """Propagate forward the input through the MHB.

        We compute for each head the queries, keys and values matrices,
        followed by the Scaled Dot-Product. The result is concatenated 
        and returned with shape (batch_size, K, d_model).

        Parameters:
        query:
            Input tensor with shape (batch_size, K, d_model) used to compute queries.
        key:
            Input tensor with shape (batch_size, K, d_model) used to compute keys.
        value:
            Input tensor with shape (batch_size, K, d_model) used to compute values.
        mask:
            Mask to apply on scores before computing attention.
            One of ``'subsequent'``, None. Default is None.

        Returns:
            Self attention tensor with shape (batch_size, K, d_model).
        """
        batch_size = query.shape[0]

        # Apply padding to input sequence
        query = F.pad(query.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)
        key = F.pad(key.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)
        value = F.pad(value.transpose(1, 2), (self._padding, self._padding), 'replicate').transpose(1, 2)

        # Compute Q, K and V, concatenate heads on batch dimension
        queries = torch.cat(self._W_q(query).chunk(self._h, dim=-1), dim=0)
        keys = torch.cat(self._W_k(key).chunk(self._h, dim=-1), dim=0)
        values = torch.cat(self._W_v(value).chunk(self._h, dim=-1), dim=0)

        # Divide Q, K and V using a moving window
        queries = queries.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)
        keys = keys.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._q, self._window_size)).transpose(1, 2)
        values = values.unfold(dimension=1, size=self._window_size, step=self._step).reshape((-1, self._v, self._window_size)).transpose(1, 2)

        # Scaled Dot Product
        self._scores = torch.bmm(queries, keys.transpose(1, 2)) / np.sqrt(self._window_size)

        # Compute local map mask
        if self._attention_size is not None:
            self._scores = self._scores.masked_fill(self._attention_mask, float('-inf'))

        # Compute future mask
        if mask == "subsequent":
            self._scores = self._scores.masked_fill(self._future_mask, float('-inf'))

        # Apply softmax
        self._scores = F.softmax(self._scores, dim=-1)

        attention = torch.bmm(self._scores, values)

        # Fold chunks back
        attention = attention.reshape((batch_size*self._h, -1, self._window_size, self._v))
        attention = attention[:, :, self._padding:-self._padding, :]
        attention = attention.reshape((batch_size*self._h, -1, self._v))

        # Concatenat the heads
        attention_heads = torch.cat(attention.chunk(self._h, dim=0), dim=-1)

        # Apply linear transformation W^O
        self_attention = self._W_o(attention_heads)

        return self_attention


def generate_original_PE(length: int, d_model: int, period: Optional[int] = 24) -> torch.Tensor:
    """Generate positional encoding as described in original paper.  :class:`torch.Tensor`
    Parameters:
    length:
        Time window length, i.e. K.
    d_model:
        Dimension of the model vector.
    Returns:
        Tensor of shape (K, d_model).
    """
    PE = torch.zeros((length, d_model))
    pos = torch.arange(length).unsqueeze(1)
    
    PE[:, 0::2] = torch.sin(
        pos / torch.pow(1000, torch.arange(0, d_model, 2, dtype=torch.float32)/d_model))
    PE[:, 1::2] = torch.cos(
        pos / torch.pow(1000, torch.arange(1, d_model, 2, dtype=torch.float32)/d_model))

    return PE


def generate_regular_PE(length: int, d_model: int, period: Optional[int] = 24) -> torch.Tensor:
    """Generate positional encoding with a given period.
    Parameters:
    length:
        Time window length, i.e. K.
    d_model:
        Dimension of the model vector.
    period:
        Size of the pattern to repeat.
        Default is 12.
    Returns:
        Tensor of shape (K, d_model).
    """
    PE = torch.zeros((length, d_model))
    pos = torch.arange(length, dtype=torch.float32).unsqueeze(1)

    PE = torch.sin(pos * 2 * np.pi / period)
    PE = PE.repeat((1, d_model))

    return PE


def generate_local_map_mask(chunk_size: int,
                            attention_size: int,
                            mask_future=False,
                            device: torch.device = 'cpu') -> torch.BoolTensor:
    """Compute attention mask as attention_size wide diagonal.

    Parameters:
    chunk_size:
        Time dimension size.
    attention_size:
        Number of backward elements to apply attention.
    device:
        torch device. Default is 'cpu'.

    Returns:
        Mask as a boolean tensor.
    """
    local_map = np.empty((chunk_size, chunk_size))
    i, j = np.indices(local_map.shape)

    if mask_future:
        local_map[i, j] = (i - j > attention_size) ^ (j - i > 0)
    else:
        local_map[i, j] = np.abs(i - j) > attention_size

    return torch.BoolTensor(local_map).to(device)


class Decoder(nn.Module):
    """Decoder block from Attention is All You Need.
    Apply two Multi Head Attention block followed by a Point-wise Feed Forward block.
    Residual sum and normalization are applied at each step.

    Parameters:
    d_model: 
        Dimension of the input vector.
    q:
        Dimension of all query matrix.
    v:
        Dimension of all value matrix.
    h:
        Number of heads.
    attention_size:
        Number of backward elements to apply attention.
        Deactivated if ``None``. Default is ``None``.
    dropout:
        Dropout probability after each MHA or PFF block.
        Default is '0.3'.
    chunk_mode:
        Swict between different MultiHeadAttention blocks.
        One of 'chunk', 'window' or 'None'. Default is 'chunk'.
    """

    def __init__(self,
                 d_model: int,
                 q: int,
                 v: int,
                 h: int,
                 attention_size: int = None,
                 dropout: float = 0.3,
                 chunk_mode: str = 'chunk'):
        """Initialize the Decoder block"""
        super().__init__()

        chunk_mode_modules = {
            'chunk': MultiHeadAttentionChunk,
            'window': MultiHeadAttentionWindow,
        }

        if chunk_mode in chunk_mode_modules.keys():
            MHA = chunk_mode_modules[chunk_mode]
        elif chunk_mode is None:
            MHA = MultiHeadAttention
        else:
            raise NameError(
                f'chunk_mode "{chunk_mode}" not understood. Must be one of {", ".join(chunk_mode_modules.keys())} or None.')

        self._selfAttention = MHA(d_model, q, v, h, attention_size=attention_size)

        self._encoderDecoderAttention = MHA(d_model, q, v, h, attention_size=attention_size)

        self._feedForward = PositionwiseFeedForward(d_model)
        self._layerNorm1 = nn.LayerNorm(d_model)
        self._layerNorm2 = nn.LayerNorm(d_model)
        self._layerNorm3 = nn.LayerNorm(d_model)
        self._layerNorm4 = nn.LayerNorm(d_model)
        self._layerNorm5 = nn.LayerNorm(d_model)


        self._dopout = nn.Dropout(p=dropout)


        ## 20231216 use MoE layer 
        self._moe = HeirarchicalMoE(dim = d_model, num_experts = (4, 4))      
        self._feedForward2 = PositionwiseFeedForward(d_model)  
        
        ## 20231218 use SoftMoE layer
        self._softmoe = SoftMoE(dim = 512,
                                seq_len = 128,
                                num_experts = 8
                                )
        ## 20231218 use DynamicSlotsSoftMoE layer
        self._dynamicslots_softmoe= DynamicSlotsSoftMoE(dim = 512, # model dimensions
                                    num_experts = 4,   # number of experts
                                    geglu = True
                                    )
        
        ## 20231219 st_moe_pytorch, SparseMoEBlock
        self._st_moe = StMoE(
            dim = 512,
            num_experts = 8,    # increase the experts (# parameters) of your model without increasing computation
            gating_top_n = 2,   # default to top 2 gating, but can also be more (3 was tested in the paper with a lower threshold)
            threshold_train = 0.2, # at what threshold to accept a token to be routed to second expert and beyond - 0.2 was optimal 
                                   #for 2 expert routing, and apparently should be lower for 3
            threshold_eval = 0.2,
            # experts have fixed capacity per batch. we need some extra capacity in case gating is not perfectly balanced.
            capacity_factor_train = 1.25,  
            capacity_factor_eval = 1.25,  #2.,  # capacity_factor_* should be set to a value >=1
            balance_loss_coef = 1e-2,   # multiplier on the auxiliary expert balancing auxiliary loss
            router_z_loss_coef = 1e-3,  # loss weight for router z-loss
            )
        
        self._moe_block = SparseMoEBlock(self._st_moe,
                                   add_ff_before = True,
                                   add_ff_after = True
                                   )
        

    def forward(self, x: torch.Tensor, memory: torch.Tensor) -> torch.Tensor:
        """Propagate the input through the Decoder block.

        Apply the self attention block, add residual and normalize.
        Apply the encoder-decoder attention block, add residual and normalize.
        Apply the feed forward network, add residual and normalize.

        Parameters:
        x:
            Input tensor with shape (batch_size, K, d_model).
        memory:
            Memory tensor with shape (batch_size, K, d_model)
            from encoder output.
        Returns:
        x:
            Output tensor with shape (batch_size, K, d_model).
        """
        # Self attention
        residual = x
        x = self._selfAttention(query=x, key=x, value=x, mask="subsequent")
        x = self._dopout(x)
        x = self._layerNorm1(x + residual)

        # Encoder-decoder attention
        # attention! make a contrast the memory with the x !!!!!!!!!!!!!!!!!!!!!!!!!!!
        # memory means the key and value from Encoder layers
        residual = x
        x = self._encoderDecoderAttention(query=x, key=memory, value=memory)

        x = self._dopout(x)
        x = self._layerNorm2(x + residual)

        # # Feed forward Layer
        residual = x
        x = self._feedForward(x)
        x = self._dopout(x)
        x = self._layerNorm3(x + residual)


        # ## 20231216 add moe layer for replace Feed forward Layer
        # residual = x
        # x, aux_loss = self._moe(x)
        # x = self._dopout(x)
        # x = self._layerNorm4(x + residual)    

        
        ## 20231216 add moe layer for replace Feed forward Layer
        # residual = x
        # x, aux_loss = self._moe(x)
        # x = self._dopout(x)
        # x = self._layerNorm4(x + residual)

        
        # ## 20231218 SoftMoE layer
        # x = self._softmoe(x) + x
        # x = self._dynamicslots_softmoe(x) + x
        
        ## 20231219 st_moe 
        x, total_aux_loss, balance_loss, router_z_loss  = self._moe_block(x)
        
        # # Feed forward Layer after MoE Layer
        residual = x
        x = self._feedForward2(x)
        x = self._dopout(x)
        x = self._layerNorm5(x + residual)
        
        return x



class PositionwiseFeedForward(nn.Module):
    """Position-wise Feed Forward Network block from Attention is All You Need.

    Apply two linear transformations to each input, separately but indetically. We
    implement them as 1D convolutions. Input and output have a shape (batch_size, d_model).
    Parameters:
    d_model:
        Dimension of input tensor.
    d_ff:
        Dimension of hidden layer, default is 2048.
    """
    def __init__(self,
                 d_model: int,
                 d_ff: Optional[int] = 2048):
        """Initialize the PFF block."""
        super().__init__()

        self._linear1 = nn.Linear(d_model, d_ff)
        self._linear2 = nn.Linear(d_ff, d_model)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Propagate forward the input through the PFF block.
        Apply the first linear transformation, then a relu actvation,
        and the second linear transformation.
        Parameters:
        x:
            Input tensor with shape (batch_size, K, d_model).
        Returns:
            Output tensor with shape (batch_size, K, d_model).
        """
        return self._linear2(F.relu(self._linear1(x)))


# define Affine transformer
class Affine(nn.Module):
    def __init__(self, d_model:int, K: Optional[int] = 128): # ### 96 or 192
        super().__init__()
        self.alpha = nn.Parameter(2e-4 * torch.ones(K, d_model))

        self.beta = nn.Parameter(2e-1 * torch.ones(d_model)) 
        self.scale = nn.Parameter(2e-2 * torch.ones(d_model)) #### 1e-4

    def forward(self, x):
        ##### return self.alpha * x + self.beta
        return F.relu(self.alpha * x + self.beta) * self.scale + x      # 20220215
        # return F.relu(self.alpha * x + self.beta) + x   #### works


class Mlp(nn.Module):
    def __init__(self, d_model:int, d_ff: Optional[int] = 2048):
        super().__init__()

        self.fc1 = nn.Linear(d_model, d_ff)
        self.fc2 = nn.Linear(d_ff, d_model)
        self.drop = nn.Dropout(0.2)
        # self.alpha = nn.Parameter(1e-9 * torch.ones(d_model))
        # self.beta = nn.Parameter(torch.zeros(d_model))

    def forward(self, x):
        x = self.fc1(x)
        # x = self.act(x)
        x = F.relu(x)
        # x = torch.sigmoid(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = self.drop(x)
        #
        return x


class OZELoss(nn.Module):
    """Custom loss for TRNSys metamodel.
    Compute the intergral of the squared differences over sequential data span. 
    Sum the log with a coeficient ``alpha``.
    .. math::
        \Delta_T = \sqrt{\int (y_{est}^T - y^T)^2}
        \Delta_Q = \sqrt{\int (y_{est}^Q - y^Q)^2}
        loss = log(1 + \Delta_T) + \\alpha \cdot log(1 + \Delta_Q)
    Parameters:
    alpha:
        Coefficient for consumption. Default is '0.3'.
    """

    def __init__(self, reduction: str = 'mean', alpha: float = 0.3):
        super().__init__()
        self.alpha = alpha
        self.reduction = reduction
        self.base_loss = nn.MSELoss(reduction=self.reduction)

    def forward(self,
                y_true: torch.Tensor,
                y_pred: torch.Tensor) -> torch.Tensor:
        """Compute the loss between a target value and a prediction.
        Parameters:
        y_true:
            Target value.
        y_pred:
            Estimated value.
        Returns:
        Loss as a tensor with gradient attached.
        """
        delta_Q = self.base_loss(y_pred[..., :-1], y_true[..., :-1])
        delta_T = self.base_loss(y_pred[..., -1], y_true[..., -1])

        if self.reduction == 'none':
            delta_Q = delta_Q.mean(dim=(1, 2))
            delta_T = delta_T.mean(dim=(1))

        return torch.log(1 + delta_T) + self.alpha * torch.log(1 + delta_Q)

