import math
import torch.nn as nn
import torch.nn.functional as F
import torch


def process_expert(expert_model, x_expert, expert_i, top_index, y, lock):
    y_expert = expert_model(x_expert)  # (..., emb)
    add_index = (top_index == expert_i).nonzero().flatten()  # 要修改的下标

    with lock:  # 确保对共享变量的访问是线程安全的
        y.index_add_(dim=0, index=add_index, source=y_expert)  # 更新 y


class Linear(nn.Module):
    def __init__(self, in_features, out_features=None, bias=False):
        super(Linear, self).__init__()
        out_features = out_features if out_features else in_features
        self.fc = nn.Linear(in_features, out_features, bias=bias)

    def forward(self, x):
        return self.fc(x)


class MoELayer(nn.Module):
    def __init__(self, num_experts, in_features, out_features, experts=None):
        super(MoELayer, self).__init__()
        self.num_experts = num_experts
        self.experts = experts if experts \
            else nn.ModuleList([Linear(in_features, out_features) for _ in range(num_experts)])
        self.gate = nn.Linear(in_features, num_experts)

    def forward(self, x):
        gate_scores = F.softmax(self.gate(x), dim=-1)
        expert_outputs = torch.stack([expert(x) for expert in self.experts], dim=1)
        output = torch.bmm(gate_scores.unsqueeze(1), expert_outputs).squeeze(1)
        return output


class Expert(nn.Module):
    def __init__(self, emb_size):
        super().__init__()

        self.seq = nn.Sequential(
            nn.Linear(emb_size, emb_size),
            nn.ReLU(),
            nn.Linear(emb_size, emb_size),
        )

    def forward(self, x):
        return self.seq(x)


class MyMoELayer(nn.Module):
    """
    - ref: mnist-onnx-runtime
    """

    def __init__(self, num_experts, in_features, top_k, w_importance=0.01, expert_cls=None):
        super().__init__()
        self.num_experts = num_experts
        self.in_features = in_features
        self.top_k = top_k
        self.w_importance = w_importance

        expert_cls = expert_cls if expert_cls else Expert
        self.expert_cls = expert_cls
        self.experts = nn.ModuleList([expert_cls(in_features) for _ in range(num_experts)])
        # self.experts = experts if experts \
        #     else nn.ModuleList([Linear(in_features, out_features) for _ in range(num_experts)])
        self.gate = nn.Linear(in_features, num_experts)

        self.top = top_k
        self.noise = nn.Linear(in_features, num_experts)  # 给gate输出概率加噪音用

    def expert_forward(self, expert_model, x_expert):
        return expert_model(x_expert)

    def forward(self, x):
        # (b * s, e)
        x_shape = x.shape
        # print(f'--- 1 --- x.shape: {x.shape}')
        # self.in_features
        # self.gate(x)
        gate_prob = F.softmax(self.gate(x), dim=-1)

        # 2024-05-05 Noisy Top-K Gating，优化expert倾斜问题
        if self.training:  # 仅训练时添加噪音
            noise = torch.randn_like(gate_prob) * nn.functional.softplus(
                self.noise(x))  # https://arxiv.org/pdf/1701.06538 , StandardNormal()*Softplus((x*W_noise))
            gate_prob = gate_prob + noise

        # top expert
        top_weights, top_index = torch.topk(gate_prob, k=self.top, dim=-1)  # top_weights: (batch*seq_len,top), top_index: (batch*seq_len,top)
        top_weights = F.softmax(top_weights, dim=-1)

        top_weights = top_weights.view(-1)  # (batch*seq_len*top)
        top_index = top_index.view(-1)  # (batch*seq_len*top)

        x = x.unsqueeze(1).expand(x.size(0), self.top, x.size(-1)).reshape(-1, x.size(-1))  # (batch*seq_len*top,emb)
        y = torch.zeros_like(x)  # (batch*seq_len*top,emb)

        # x: (b, s, e) -> (b*s, e) -> num_experts * (b*s, e)
        # x = x.repeat_interleave(top_k, dim=0)
        # x.shape

        # run by per expert
        # use_multiprocessing = False
        use_multiprocessing = 0
        if use_multiprocessing:
            print('--- on use_multiprocessing')

            # x = x.unsqueeze(1).expand(x.size(0), self.top, x.size(-1)).reshape(-1, x.size(-1))
            # y = torch.zeros_like(x)  # (batch*seq_len*top, emb)

            from concurrent.futures import ProcessPoolExecutor
            # 使用多进程
            with ProcessPoolExecutor() as executor:
                futures = []
                for expert_i, expert_model in enumerate(self.experts):
                    top_index.shape, expert_i, x.shape
                    # x_expert = x[top_index == expert_i]  # (..., emb)
                    x_expert = x[top_index == expert_i].detach()  # 使用 detach() 防止传递需要梯度的张量
                    if x_expert.size(0) > 0:  # 确保有数据
                        futures.append(executor.submit(self.expert_forward, expert_model, x_expert))

                for i, future in enumerate(futures):
                    print(f'--------- i: {i}')
                    y_expert = future.result()
                    add_index = (top_index == i).nonzero().flatten()  # 要修改的下标
                    y = y.index_add(dim=0, index=add_index, source=y_expert)  # 更新 y
        else:
            for expert_i, expert_model in enumerate(self.experts):
                # break
                x_expert = x[top_index == expert_i]  # (...,emb)
                y_expert = expert_model(x_expert)  # (...,emb)

                # y[top_index == expert_i] = y_expert
                add_index = (top_index == expert_i).nonzero().flatten()  # 要修改的下标
                y = y.index_add(dim=0, index=add_index,
                                source=y_expert)  # 等价于y[top_index==expert_i]=y_expert，为了保证计算图正确，保守用index_add算子


        if 0:
            import threading
            import torch.multiprocessing as mp

            # print('-------- on threading!')

            y = torch.zeros_like(x)  # 根据需要初始化 y

            if 0:
                processes = []
                with mp.Pool(processes=mp.cpu_count()) as pool:
                    for expert_i, expert_model in enumerate(self.experts):
                        x_expert = x[top_index == expert_i]  # (..., emb)
                        # 使用 apply_async 异步调用
                        process = pool.apply_async(process_expert, (expert_model, x_expert, expert_i, top_index, y))
                        processes.append(process)

                    for process in processes:
                        add_index, y_expert = process.get()  # 获取返回值
                        y.index_add_(dim=0, index=add_index, source=y_expert)  # 更新 y
            else:
                threads = []
                lock = threading.Lock()  # 创建锁以保护共享资源

                for expert_i, expert_model in enumerate(self.experts):
                    x_expert = x[top_index == expert_i]  # (..., emb)
                    thread = threading.Thread(target=process_expert,
                                              args=(expert_model, x_expert, expert_i, top_index, y, lock))
                    threads.append(thread)
                    thread.start()

                for thread in threads:
                    thread.join()  # 等待所有线程完成

            # def main_process(experts, x, top_index):
            #     y = torch.zeros_like(x)  # 根据需要初始化 y
            #     threads = []
            #     lock = threading.Lock()  # 创建锁以保护共享资源
            #
            #     for expert_i, expert_model in enumerate(experts):
            #         x_expert = x[top_index == expert_i]  # (..., emb)
            #         thread = threading.Thread(target=process_expert,
            #                                   args=(expert_model, x_expert, expert_i, top_index, y, lock))
            #         threads.append(thread)
            #         thread.start()
            #
            #     for thread in threads:
            #         thread.join()  # 等待所有线程完成
            #
            #     return y
            #
            # y = main_process(x, top_index)


        # weighted sum experts
        top_weights = top_weights.view(-1, 1).expand(-1, x.size(-1))  # (batch*seq_len*top,emb)
        y = y * top_weights
        y = y.view(-1, self.top, x.size(-1))  # (batch*seq_len,top,emb)
        y = y.sum(dim=1)  # (batch*seq_len,emb)

        # 2024-05-05 计算gate输出各expert的累计概率, 做一个loss让各累计概率尽量均衡，避免expert倾斜
        # https://arxiv.org/pdf/1701.06538 BALANCING EXPERT UTILIZATION
        if self.training:
            importance = gate_prob.sum(dim=0)  # 将各expert打分各自求和 sum( (batch*seq_len,experts) , dim=0)
            # 求CV变异系数（也就是让expert们的概率差异变小）, CV=标准差/平均值
            importance_loss = self.w_importance * (torch.std(importance) / torch.mean(importance)) ** 2
        else:
            importance_loss = None
        return y.view(x_shape), gate_prob, importance_loss  # 2024-05-05 返回gate的输出用于debug其均衡效果, 返回均衡loss


class MyMoELayerClassification(nn.Module):
    def __init__(self):
        super().__init__()
        self.sigmoid = nn.Sigmoid()

    def forward(self, input_data: tuple):
        x = input_data[0]
        return self.sigmoid(x)


class MyMoEClassifier(nn.Module):
    def __init__(self, input_size, experts, top, emb_size, output_size, w_importance=0.01, expert_cls=None):
        super().__init__()
        self.input_size = input_size
        self.emb = nn.Linear(input_size, emb_size)
        self.moe = MyMoELayer(num_experts=experts, top_k=top, in_features=emb_size, w_importance=w_importance,
                              expert_cls=expert_cls)
        self.cls = nn.Linear(emb_size, output_size)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # (b * s, e)
        # print(f'--- 0 --- x.shape: {x.shape}')
        x = x.view(-1, self.input_size)  # (batch_size, seq_len, input_size) -> (batch_size * seq_len, input_size)
        # print(f'--- 1 --- x.shape: {x.shape}')

        y = self.emb(x)
        # print(f'--- 2 --- y.shape: {y.shape}')  # (b*s, i) -> (b*s, e)

        y, gate_prob, importance_loss = self.moe(y)
        return self.sigmoid(self.cls(y)), gate_prob, importance_loss

    def get_loss(self, x, y_true, criterion=None, is_output=False):
        output = x if is_output else self.forward(x)

        y_pred, gate_prob, importance_loss = output

        # criterion = criterion if criterion else F.cross_entropy
        criterion = criterion if criterion else F.binary_cross_entropy
        # y_pred.shape, y_true.shape
        # y_pred: torch.Tensor
        # conv y_pred shape to y_true
        # y_pred = y_pred.view(-1, y_pred.shape[-1])
        # y_pred = y_pred.resize_as(y_true)
        # F.cross_entropy(y_pred, y_true)
        # y_pred.resize_(y_true.shape)
        y_pred = y_pred.view(y_true.shape)  # 改变形状

        loss = criterion(y_pred, y_true)

        if importance_loss is not None:
            loss += importance_loss
        return loss


if __name__ == '__main__':
    """
    - moe的教程和并行原理: https://zhuanlan.zhihu.com/p/653270049
    - moe的并行方案实现: https://github.com/databricks/megablocks
    """
    # --- MoELayer
    # input_size = 2
    # output_size = 1
    # num_experts = 3
    # batch_size = 10
    # model = MoELayer(num_experts, input_size, output_size)
    # demo = torch.randn(batch_size, input_size)
    # output = model(demo)
    # print(output.shape)

    # # --- MyMoELayer
    # input_size = 2
    # output_size = 1
    # num_experts = 3
    # batch_size = 10
    # top_k = 2
    # model = MyMoELayer(num_experts, input_size, top_k=top_k)
    # demo = torch.randn(batch_size, input_size)
    # output = model(demo)
    # print(output[0].shape)

    # --- MyMoEClassifier
    input_size = 2
    output_size = 1
    num_experts = 3
    top_k = 2

    emb_size = 10

    batch_size = 10

    model = MyMoEClassifier(input_size=input_size, experts=num_experts, top=top_k, emb_size=emb_size, output_size=output_size)
    # model = MyMoEClassifier(num_experts, input_size, output_size, top_k=top_k)
    # model = MyMoELayer(num_experts, input_size, top_k=top_k)
    demo = torch.randn(batch_size, input_size)
    y_true = torch.randint(0, 1, (batch_size, output_size)).float()

    output = model(demo)
    # loss = model.get_loss(demo, y_true, is_output=False)
    y_pred, gate_prob, importance_loss = output
    # conv torch.tensor to numpy arr
    y_pred = y_pred.detach().numpy()
    print(demo.shape, '->', y_pred.shape)
    print(y_pred.shape, '--- sum:', sum(y_pred), '--- importance_loss:', importance_loss)
