#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# Copyright 2019 Shigeki Karita
#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)
"""Positionwise feed forward layer definition."""

import random
import torch
import torch.distributed as dist

from typing import Union
from wenet.models.moe_comformer.epoch_based_activator import simple_factory, EpochBasedNumber
from wenet.models.moe_comformer.moe_train_utils import accum_loss, register_update_function





class PositionwiseFeedForward(torch.nn.Module):
    """Positionwise feed forward layer.

    FeedForward are appied on each position of the sequence.
    The output dim is same with the input dim.

    Args:
        idim (int): Input dimenstion.
        hidden_units (int): The number of hidden units.
        dropout_rate (float): Dropout rate.
        activation (torch.nn.Module): Activation function
    """
    def __init__(self,
                 idim: int,
                 hidden_units: int,
                 dropout_rate: float,
                 activation: torch.nn.Module = torch.nn.ReLU(),
                 bias: bool = True):
        """Construct a PositionwiseFeedForward object."""
        super(PositionwiseFeedForward, self).__init__()
        self.w_1 = torch.nn.Linear(idim, hidden_units, bias=bias)
        self.activation = activation
        self.dropout = torch.nn.Dropout(dropout_rate)
        self.w_2 = torch.nn.Linear(hidden_units, idim, bias=bias)

    def reset_parameters(self):
        self.w_1.reset_parameters()
        self.w_2.reset_parameters()

    def forward(self, xs: torch.Tensor) -> torch.Tensor:
        """Forward function.

        Args:
            xs: input tensor (B, L, D)
        Returns:
            output tensor, (B, L, D)
        """
        return self.w_2(self.dropout(self.activation(self.w_1(xs))))


class LoadStat(object):
    def __init__(self, n):
        self.stat = torch.zeros(n, dtype=torch.int32)
        self.frames = 0

    def update(self, t, selected):
        self.stat = self.stat.to(selected.device)
        self.stat += torch.bincount(selected.flatten(),
                                    minlength=len(self.stat))
        self.frames += t

    def get(self):
        if self.frames <= 0: return []
        if dist.is_available() and dist.is_initialized():
            #dist.all_reduce(self.stat)
            pass
        return (self.stat / self.frames).tolist()

    def reset(self):
        self.stat.zero_()
        self.frames = 0


class LoadBalancedRouter(torch.nn.Module):
    def __init__(self, gate_module, n: int, auxiliary_balance_factor=0,
                 bias_update_speed=0, overload_coef=2, underload_coef=0.5,
                 random_epochs=0, random_prob=0, **kwargs):
        super(LoadBalancedRouter, self).__init__()
        self.gate = gate_module
        self.auxiliary_balance_factor = simple_factory(
            EpochBasedNumber, auxiliary_balance_factor)
        self.bias_update_speed = simple_factory(
            EpochBasedNumber, bias_update_speed)
        if self.bias_update_speed.final_val > 0:
            self.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=False)
            self.stat = torch.zeros(n, dtype=torch.int32)
            self.overload_ratio = overload_coef / n
            self.underload_ratio = underload_coef / n
            register_update_function(self.update)
        self.random_epochs = random_epochs
        self.random_prob = random_prob
        self.random = random_epochs > 0 and random_prob > 0
        self.load_stat = LoadStat(n)

    def extra_repr(self):
        if hasattr(self, 'bias'):
            return (f'(bias): Parameter(len={len(self.bias)},'
                    f' overload_ratio={self.overload_ratio:.3f},'
                    f' underload_ratio={self.underload_ratio:.3f})')
        return None

    def set_epoch(self, epoch):
        self.auxiliary_balance_factor.set_epoch(epoch)
        self.bias_update_speed.set_epoch(epoch)
        self.random = epoch < self.random_epochs and self.random_prob > 0

    def get_debug_stat(self, infos: list, name=''):
        pieces = []
        pieces.append([f'{r:.2%}' for r in self.load_stat.get()])
        if self.bias_update_speed.value() > 0:
            pieces.append([f'{round(b, 4)}' for b in self.bias.data.tolist()])
        if len(pieces) == 1:
            load_ratio = pieces[0]
        else:
            load_ratio = [f'({", ".join(p)})' for p in zip(*pieces)]
        load_ratio = ', '.join(f'{i}: {l}' for i, l in enumerate(load_ratio))
        infos.append(f'{name}.load_ratio=[{load_ratio}]')
        self.load_stat.reset()

    def forward(self, xs: torch.Tensor, k: int):
        router = self.gate(xs)
        T, N = router.size()
        if self.training:
            if self.auxiliary_balance_factor.value() > 0:
                accum_loss(f'router{N}_balance',
                           self.load_balancing_loss(router, k))
            if self.random and random.random() < self.random_prob:
                # Random router.
                selected = torch.tensor(
                    [random.sample(range(N), k) for i in range(T)],
                    device=router.device)
                logits = router.gather(1, selected)  # (B*L, k)
            elif self.bias_update_speed.value() > 0:
                # Biased router.
                self.stat = self.stat.to(router.device)
                # bias is only used for routing and does not affect logits.
                _, selected = torch.topk(router + self.bias, k)
                self.stat += torch.bincount(selected.flatten(), minlength=N)
                logits = router.gather(1, selected)  # (B*L, k)
            else:
                # Naive router.
                logits, selected = torch.topk(router, k)
        else:
            # Always naive router during evaluation.
            logits, selected = torch.topk(router, k)
        self.load_stat.update(T, selected)
        return logits, selected

    def update(self):
        if self.bias_update_speed.value() > 0:
            if dist.is_available() and dist.is_initialized():
                dist.all_reduce(self.stat)
            ratio = self.stat / self.stat.sum(dim=-1, keepdim=True)
            # penalize overload
            self.bias += torch.where(ratio > self.overload_ratio,
                                     -self.bias_update_speed.value(), 0)
            # compensate underload
            self.bias += torch.where(ratio < self.underload_ratio,
                                     self.bias_update_speed.value(), 0)
            # mean normalize
            if abs(self.bias.mean()).item() > 0.5:
                self.bias -= self.bias.mean()
            self.stat.zero_()

    def load_balancing_loss(self, router, k):
        T, N = router.size()
        _, selected = torch.topk(router, k)
        prob = (router / router.sum(dim=-1, keepdim=True)).mean(0)  # (N,)
        uniq, freq = torch.unique(selected.flatten(), return_counts=True)
        factor = self.auxiliary_balance_factor.value() * N / T / k
        return sum(freq[i] * prob[ex] for i, ex in enumerate(uniq)) * factor


class DeepseekV3MoE(torch.nn.Module):
    def __init__(
        self,
        idim: int,
        hidden_units: int,
        dropout_rate: float,
        activation: torch.nn.Module = torch.nn.ReLU(),
        bias: bool = False,
        n_expert: Union[int, list] = 16,
        n_expert_activated: int = 4,
        n_expert_shared: int = 1,
        **kwargs,
    ):
        super(DeepseekV3MoE, self).__init__()
        groups = n_expert if isinstance(n_expert, list) else [n_expert]
        n_expert = max(groups)
        self.k = n_expert_activated - n_expert_shared
        self.s = n_expert_shared
        self.routers = torch.nn.ModuleList(
            LoadBalancedRouter(
                torch.nn.Sequential(
                    torch.nn.Linear(idim, g - self.s, bias=False),
                    torch.nn.Sigmoid(),
                ),
                g - self.s,
                **kwargs,
            ) for g in groups
        )
        self.experts = torch.nn.ModuleList(
            PositionwiseFeedForward(
                idim, hidden_units, dropout_rate, activation, bias=bias)
            for _ in range(n_expert)
        )

    def set_epoch(self, epoch):
        for router in self.routers:
            router.set_epoch(epoch)

    def get_debug_stat(self, infos: list, name=''):
        for i, router in enumerate(self.routers):
            router.get_debug_stat(infos, f'{name}.router{i}')

    def forward(self, xs: torch.Tensor) -> torch.Tensor:
        """Foward function.
        Args:
            xs: input tensor (B, L, D)
        Returns:
            output tensor, (B, L, D)
        """
        if torch.onnx.is_in_onnx_export():
            return self.forward_onnx2(xs)
        B, L, D = xs.size()  # batch size, sequence length, embedding dimension (idim)
        xs = xs.view(-1, D)  # (B*L, D)
        logits, selected_experts = random.choice(self.routers)(xs, self.k)
        weights = logits / logits.sum(dim=-1, keepdim=True)  # (B*L, k)
        # Weighted-sum.
        output = torch.zeros_like(xs)  # (B*L, D)
        for expert in self.experts[:self.s]:
            # Shared experts.
            output += expert(xs)
        for i, expert in enumerate(self.experts[self.s:]):
            # Activated non-shared experts.
            token_ids, ith_expert = torch.where(selected_experts == i)
            output[token_ids] += weights[token_ids, ith_expert, None] * \
                                 expert(xs[token_ids])
        return output.view(B, L, D)


    def forward_onnx2(self, xs: torch.Tensor) -> torch.Tensor:
        B, L, D = xs.size()  # batch size, sequence length, embedding dimension (idim)
        xs = xs.view(-1, D)  # (B*L, D)
        logits, selected_experts = random.choice(self.routers)(xs, self.k)
        weights = logits / logits.sum(dim=-1, keepdim=True)  # (B*L, k)
        num_non_shared = len(self.experts) - self.s
        len_fake = (num_non_shared * 2 + self.k - 1) // self.k
        xs = torch.cat((xs, torch.zeros(len_fake, D, device=xs.device)))  # (B*L + f, D)
        exp_fake = len_fake * self.k
        fake_selected = (torch.arange(exp_fake, device=xs.device) %
                         num_non_shared).reshape(-1, self.k)  # (f, k)
        selected_experts = torch.cat((selected_experts, fake_selected))  # (B*L + f, k)
        fake_weights = torch.ones(len_fake, self.k, device=xs.device) / self.k  # (f, k)
        weights = torch.cat((weights, fake_weights))  # (B*L + f, k)
        if random.random() < 0.1:
            print(f'2: len_fake={len_fake}, exp_fake={exp_fake}, B={B}, L={L}'
                  f', xs={tuple(xs.size())}, weights={tuple(weights.size())}'
                  f', selected={tuple(selected_experts.size())}')
            print(fake_selected)
        # Weighted-sum.
        output = torch.zeros_like(xs)  # (B*L + f, D)
        for expert in self.experts[:self.s]:
            # Shared experts.
            output += expert(xs)
        for i, expert in enumerate(self.experts[self.s:]):
            # Activated non-shared experts.
            token_ids, ith_expert = torch.where(selected_experts == i)
            output[token_ids] += weights[token_ids, ith_expert, None] * \
                                 expert(xs[token_ids])
        return output[: B * L].view(B, L, D)


def init_ff(idim: int, hidden_units: int, dropout_rate: float,
            activation: torch.nn.Module = torch.nn.ReLU(), bias: bool = False,
            moe_conf: dict = None):
    if not moe_conf:
        return PositionwiseFeedForward(idim, hidden_units, dropout_rate,
                                       activation)  # no bias for compatibily.
    return DeepseekV3MoE(idim, hidden_units, dropout_rate, activation, bias,
                         **moe_conf)
