import math
import torch
from torch import nn
from einops import rearrange
from utils import clone_module_list
from networks.common.normalization import RMSNorm
import torch.nn.functional as F

class SelfAttention(nn.Module):
    def __init__(self, 
                 args):
        super().__init__()
        
        self.n_head = args.n_heads
        self.d = args.width
        self.n_outdim = args.width
        self.attn_type = args.attn_type
        self.head_dim = self.n_outdim // self.n_head
        
        assert self.d % self.n_head==0, "hidden sizes should be divided by n_head"

        # key, query, value projections for all heads
        self.wk = nn.Linear(self.d, self.d)
        self.wq = nn.Linear(self.d, self.d)
        self.wv = nn.Linear(self.d, self.d)
        # regularization
        
        # output projection
        self.wo = nn.Linear(self.d, self.d)

    def _neg_half(self, x: torch.Tensor):
        # $\frac{d}{2}$
        d_2 = self.d // 2
        return torch.cat([-x[..., d_2:], x[..., :d_2]], dim=-1)

    
    def rope(self, x: torch.Tensor, cos_sin_item: torch.Tensor):
        
        # Split the features, we can choose to apply rotary embeddings only to a partial set of features.
        x_rope, x_pass = x[..., :self.d], x[..., self.d:]
        neg_half_x = self._neg_half(x_rope)
        
        
        x_out = torch.zeros_like(x)
        for i in range(cos_sin_item.shape[0]//2):
            x_out += (x_rope * cos_sin_item[i]) + (neg_half_x * cos_sin_item[i+3])
        
        return torch.cat((x_out, x_pass), dim=-1)
    
    def forward(
        self,
        x: torch.Tensor,
        cos_sin_item: torch.Tensor
    ):
        bsz, seqlen, _ = x.shape
        xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
        
        xq = self.rope(xq, cos_sin_item)
        xk = self.rope(xk, cos_sin_item)
        
        xq = xq.view(bsz, seqlen, self.n_head, self.head_dim).transpose(1, 2)
        xk = xk.view(bsz, seqlen, self.n_head, self.head_dim).transpose(1, 2)
        xv = xv.view(bsz, seqlen, self.n_head, self.head_dim).transpose(1, 2)

        scores = torch.matmul(xq, xk.transpose(2, 3)) / math.sqrt(self.head_dim)

        scores = F.softmax(scores.float(), dim=-1).type_as(xq)
        output = torch.matmul(scores, xv)  # (bs, n_local_heads, seqlen, head_dim)
        output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
        return self.wo(output)


class LinearSelfAttention(nn.Module):
    def __init__(self, 
                 args):
        super().__init__()
        
        self.n_head = args.n_heads
        self.d = args.width
        self.n_outdim = args.width
        # self.attn_pdrop = args.attn_pdrop
        self.attn_type = args.attn_type

        assert self.d % self.n_head==0, "hidden sizes should be divided by n_head"

        # key, query, value projections for all heads
        self.key = nn.Linear(self.d, self.d)
        self.query = nn.Linear(self.d, self.d)
        self.value = nn.Linear(self.d, self.d)
        # regularization
        # self.attn_drop = nn.Dropout(self.attn_pdrop)
        
        # output projection
        self.proj = nn.Linear(self.d, self.d)

    
    def _neg_half(self, x: torch.Tensor):
        # $\frac{d}{2}$
        d_2 = self.d // 2
        return torch.cat([-x[..., d_2:], x[..., :d_2]], dim=-1)

    
    def rope(self, x: torch.Tensor, cos_sin_item: torch.Tensor):
        
        # Split the features, we can choose to apply rotary embeddings only to a partial set of features.
        x_rope, x_pass = x[..., :self.d], x[..., self.d:]
        neg_half_x = self._neg_half(x_rope)
        
        
        x_out = torch.zeros_like(x)
        for i in range(cos_sin_item.shape[0]//2):
            x_out += (x_rope * cos_sin_item[i]) + (neg_half_x * cos_sin_item[i+3])
        
        return torch.cat((x_out, x_pass), dim=-1)
    

    '''
        Linear self Attention
    '''
    def forward(self, x: torch.Tensor, cos_sin_item: torch.Tensor):
        
        B, S1, C = x.size()
        
        q = self.query(x)
        k = self.key(x)
        v = self.value(x)
        
        q = self.rope(q, cos_sin_item)
        k = self.rope(k, cos_sin_item)
        
        q = q.view(B, S1, self.n_head, self.n_outdim // self.n_head).transpose(1, 2)  # (B, nh, T, hs)
        k = k.view(B, S1, self.n_head, self.n_outdim // self.n_head).transpose(1, 2)  # (B, nh, T, hs)
        v = v.view(B, S1, self.n_head, self.n_outdim // self.n_head).transpose(1, 2)  # (B, nh, T, hs)

        if self.attn_type == 'l1':
            q = q.softmax(dim=-1)
            k = k.softmax(dim=-1)
            k_cumsum = k.sum(dim=-2, keepdim=True)
            
            D_inv = 1. / (q * k_cumsum).sum(dim=-1, keepdim=True)       # normalized
        elif self.attn_type == "galerkin":
            q = q.softmax(dim=-1)
            k = k.softmax(dim=-1)
            
            D_inv = 1. / S1                                           # galerkin
        elif self.attn_type == "l2":                                   # still use l1 normalization
            q = q / q.norm(dim=-1,keepdim=True, p=1)
            k = k / k.norm(dim=-1,keepdim=True, p=1)
            
            k_cumsum = k.sum(dim=-2, keepdim=True)
            D_inv = 1. / (q * k_cumsum).abs().sum(dim=-1, keepdim=True)  # normalized
        else:
            raise NotImplementedError

        context = k.transpose(-2, -1) @ v
        
        # y = self.attn_drop((q @ context) * D_inv + q)
        y = (q @ context) * D_inv + q

        # output projection
        y = rearrange(y, 'b h n d -> b n (h d)')
        y = self.proj(y)
        return y

class SwitchFeedForward(nn.Module):
    """
    ## Routing among multiple FFNs
    """

    def __init__(self, *,
                 capacity_factor: float,
                 drop_tokens: bool,
                 is_scale_prob: bool,
                 n_experts: int,
                 expert: nn.Module,
                 d_model: int):
        """
        * `capacity_factor` is the capacity of each expert as a factor relative to ideally balanced load
        * `drop_tokens` specifies whether to drop tokens if more tokens are routed to an expert than the capacity
        * `is_scale_prob` specifies whether to multiply the input to the FFN by the routing probability
        * `n_experts` is the number of experts
        * `expert` is the expert layer, a [FFN module](../feed_forward.html)
        * `d_model` is the number of features in a token embedding
        * `d_ff` is the number of features in the hidden layer of the FFN
        * `dropout` is dropout probability in the FFN
        """
        super().__init__()

        self.capacity_factor = capacity_factor
        self.is_scale_prob = is_scale_prob
        self.n_experts = n_experts
        self.drop_tokens = drop_tokens

        # make copies of the FFNs
        self.experts = clone_module_list(expert, n_experts)
        # Routing layer and softmax
        self.switch = nn.Linear(d_model, n_experts)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x: torch.Tensor):
        """
        * `x` is the input to the switching module with shape `[seq_len, batch_size, d_model]`
        """

        # Capture the shape to change shapes later
        seq_len, batch_size, d_model = x.shape
        # Flatten the sequence and batch dimensions
        x = x.view(-1, d_model)

        # Get routing probabilities for each of the tokens.
        # $$p_i(x) = \frac{e^{h(x)_i}}{\sum^N_j e^{h(x)_j}}$$
        # where $N$ is the number of experts `n_experts` and
        # $h(\cdot)$ is the linear transformation of token embeddings.
        route_prob = self.softmax(self.switch(x))

        # Get the maximum routing probabilities and the routes.
        # We route to the expert with highest probability
        route_prob_max, routes = torch.max(route_prob, dim=-1)

        # Get indexes of tokens going to each expert
        indexes_list = [torch.eq(routes, i).nonzero(as_tuple=True)[0] for i in range(self.n_experts)]

        # Initialize an empty tensor to store outputs
        final_output = x.new_zeros(x.shape)

        # Capacity of each expert.
        # $$\mathrm{expert\;capacity} =
        # \frac{\mathrm{tokens\;per\;batch}}{\mathrm{number\;of\;experts}}
        # \times \mathrm{capacity\;factor}$$
        capacity = int(self.capacity_factor * len(x) / self.n_experts)
        # Number of tokens routed to each expert.
        counts = x.new_tensor([len(indexes_list[i]) for i in range(self.n_experts)])

        # Initialize an empty list of dropped tokens
        dropped = []
        # Only drop tokens if `drop_tokens` is `True`.
        if self.drop_tokens:
            # Drop tokens in each of the experts
            for i in range(self.n_experts):
                # Ignore if the expert is not over capacity
                if len(indexes_list[i]) <= capacity:
                    continue
                # Shuffle indexes before dropping
                indexes_list[i] = indexes_list[i][torch.randperm(len(indexes_list[i]))]
                # Collect the tokens over capacity as dropped tokens
                dropped.append(indexes_list[i][capacity:])
                # Keep only the tokens upto the capacity of the expert
                indexes_list[i] = indexes_list[i][:capacity]

        # Get outputs of the expert FFNs
        expert_output = [self.experts[i](x[indexes_list[i], :]) for i in range(self.n_experts)]

        # Assign to final output
        for i in range(self.n_experts):
            final_output[indexes_list[i], :] = expert_output[i]

        # Pass through the dropped tokens
        if dropped:
            dropped = torch.cat(dropped)
            final_output[dropped, :] = x[dropped, :]

        if self.is_scale_prob:
            # Multiply by the expert outputs by the probabilities $y = p_i(x) E_i(x)$
            final_output = final_output * route_prob_max.view(-1, 1)
        else:
            # Don't scale the values but multiply by $\frac{p}{\hat{p}} = 1$ so that the gradients flow
            # (this is something we experimented with).
            final_output = final_output * (route_prob_max / route_prob_max.detach()).view(-1, 1)

        # Change the shape of the final output back to `[seq_len, batch_size, d_model]`
        final_output = final_output.view(seq_len, batch_size, d_model)

        # Return
        #
        # * the final output
        # * number of tokens routed to each expert
        # * sum of probabilities for each expert
        # * number of tokens dropped.
        # * routing probabilities of the selected experts
        #
        # These are used for the load balancing loss and logging
        return final_output, counts, route_prob.sum(0), len(dropped), route_prob_max, routes


class FeedForward(nn.Module):
    def __init__(
        self,
        dim
    ):
        super().__init__()
        self.linear1 = nn.Linear(dim[0], dim[1])
        self.linear2 = nn.Linear(dim[1], dim[2])

    def forward(self, x):
        return self.linear2(F.silu(self.linear1(x)))


class TransformerBlock(nn.Module):
    '''
    single basic transformerblock
    '''

    def __init__(self, args) -> None:

        super().__init__()

        self.d = args.width
        # normalization
        if args.normalization == 'layerNorm':
            self.attention_norm = nn.LayerNorm([args.width])
            self.ffn_norm = nn.LayerNorm([args.width])
        elif args.normalization == 'RMSnorm':
            self.attention_norm = RMSNorm(args.width, eps=args.norm_eps)
            self.ffn_norm = RMSNorm(args.width, eps=args.norm_eps)
        else:
            raise NotImplementedError

        # dropout
        self.dropout = False
        if args.dropout_prob is not None:
            self.dropout = True
            self.attention_dropout = nn.Dropout(args.dropout_prob)
            self.ffn_dropout = nn.Dropout(args.dropout_prob)

        if args.attn_type == 'normal':
            self.attention_layer = SelfAttention(
                args=args
            )
        elif args.attn_type in ['galerkin', 'l1', 'l2']:
            self.attention_layer = LinearSelfAttention(
                args=args
            )
        else:
            raise NotImplementedError

        FFN_block = FeedForward([self.d, self.d*4, self.d])

        self.MOE_block = SwitchFeedForward(
            capacity_factor = args.moe_capacity_factor,
            drop_tokens=False,
            is_scale_prob=True,
            n_experts=args.n_experts,
            expert=FFN_block,
            d_model=args.width,
        )

    def forward(self,
        x: torch.Tensor,
        cos_sin_item: torch.Tensor,
    ):
        # attention
        h = self.attention_layer(self.attention_norm(x), cos_sin_item)
        # res-connection
        if self.dropout:
            h = x + self.attention_dropout(h)
        else:
            h = x + h

        # moe(ffn)
        out, counts, route_prob, n_dropped, route_prob_max, routes = self.MOE_block(self.ffn_norm(h))
        if self.dropout:
            out = h + self.ffn_dropout(out)
        else:
            out = h + out
        
        return out, counts, route_prob, n_dropped, route_prob_max, routes


    
    
