import numpy as np
import torch
import torch.nn as nn
from pytorch_wavelets import DWT1D, IDWT1D
import torch.nn.functional as F

class ExpertsBlock(nn.Module):
    def __init__(self, configs):
        super().__init__()
        self.seq_len = int(configs.seq_len*configs.proj_wight)
        self.intermediate_size = configs.intermediate_size
        self.gate_proj = nn.Linear(self.seq_len, self.intermediate_size, bias=False)
        self.up_proj = nn.Linear(self.seq_len, self.intermediate_size, bias=False)
        self.down_proj = nn.Linear(self.intermediate_size, self.seq_len, bias=False)
        self.act_fn = F.gelu
        self.norm = nn.LayerNorm(self.seq_len, eps=1e-6)

    def forward(self, hidden_state):
        return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))


class WavMoE(nn.Module):
    def __init__(self, configs):
        super().__init__()
        self.seq_len = int(configs.seq_len*configs.proj_wight)
        self.intermediate_size = configs.intermediate_size
        self.top_k = configs.top_k
        self.shared_expert = configs.shared_experts
        self.wavelet = configs.wavelet
        self.level = configs.level
        self.proj_wight = configs.proj_wight

        # 共享专家
        self.shared_experts = nn.ModuleList(
            [ExpertsBlock(
                configs
            ) for _ in range(self.shared_expert)]
        )
        # 稀疏专家
        self.moe_intermediate_size = self.intermediate_size // self.top_k
        self.experts = nn.ModuleList(
            [ExpertsBlock(
                configs
            ) for _ in range(self.top_k * 2)]
        )

        # 路由器
        self.gate = nn.Linear(self.seq_len, self.top_k * 2, bias=False)
        self.norm = nn.LayerNorm(int(self.seq_len), eps=1e-6)
        self.norm2 = nn.LayerNorm(configs.enc_in, eps=1e-6)
        # 初始化 DWT 和 IDWT
        self.dwt = DWT1D(wave=self.wavelet, J=self.level, mode='zero')
        self.idwt = IDWT1D(wave=self.wavelet, mode='zero')
        self.shared_weight = nn.Linear(self.top_k,1, bias=False)
        self.moe_weight = torch.nn.Parameter(torch.FloatTensor([1] * configs.enc_in), requires_grad=True)


    def forward(self, x: torch.Tensor):
        # 输入形状：(batch_size, feature_dim, seq_len)
        batch_size, feature_dim, seq_len = x.shape
        # 小波分解：提取不同频率的特征
        yl, yh = self.dwt(x)
        # 提取低频特征
        low_freq_features = []  # 初始化低频特征列表
        yh_zeros = [torch.zeros_like(tensor) for tensor in yh]
        for i in range(self.level):
            # 使用 IDWT 逐层重建低频特征
            coeffs = (yl, yh_zeros[:-i]+yh[-i:])  # 从yl（i-1）到yl0，将前面部分填0
            low_freq_feature = self.idwt(coeffs)[..., :seq_len]  # 重构信号
            print("low",low_freq_feature.shape)
            low_freq_features.append(low_freq_feature)
        # 利用均值计算每层小波系数的稀疏性
        # sparsity_weights = [torch.mean(torch.abs(c), dim=-1, keepdim=True) for c in low_freq_features]
        # sparsity_weights = torch.cat(sparsity_weights, dim=-1)
        # sparsity_weights = sparsity_weights / sparsity_weights.sum(dim=-1, keepdim=True)
        # shared_outputs = torch.einsum('bhi, ibhs -> bhs', sparsity_weights, shared_outputs)
        # 处理每层低频特征：通过对应的共享专家
        shared_outputs = []
        for i, expert in enumerate(self.shared_experts):
            low_freq_feature = low_freq_features[i]
            shared_outputs.append(expert(low_freq_feature))
        shared_outputs = torch.stack(shared_outputs, dim=-1)
        shared_outputs = self.norm(self.shared_weight(shared_outputs).squeeze(-1)).permute(0, 2, 1)
        print("shared_outputs",shared_outputs.shape)

        # 处理高频特征：通过稀疏专家
        high_freq_feature = self.idwt((torch.zeros_like(yl), yh))[..., :seq_len].reshape(batch_size*feature_dim, seq_len)  # 重构高频特征
        router_logits = self.gate(high_freq_feature)
        routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
        routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
        routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
        routing_weights = routing_weights.to(high_freq_feature.dtype)
        final_x = torch.zeros_like(high_freq_feature).reshape(batch_size*feature_dim, seq_len)
        expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.top_k * 2).reshape(batch_size*feature_dim,self.top_k * 2,self.top_k)
        expert_mask = expert_mask.permute(1,2,0)

        for expert_idx in range(self.top_k * 2):
            expert_layer = self.experts[expert_idx]
            idx, top_x = torch.where(expert_mask[expert_idx])
            current_state = high_freq_feature[top_x].reshape(-1, seq_len)
            current_x = expert_layer(current_state).permute(1, 0) * routing_weights[top_x, idx]
            final_x.index_add_(0, top_x, current_x.permute(1, 0))
        final_x=final_x.reshape(batch_size, seq_len, feature_dim)
        print("final_x", final_x.shape,shared_outputs.shape)
        # 合并低频和高频特征的输出
        final_output = self.norm2(self.moe_weight*shared_outputs + final_x).permute(0,2,1)
        final_output = final_output.reshape(batch_size, feature_dim, seq_len)
        # print(x[..., :-10])
        # print(final_output[..., :-10])
        return final_output, router_logits



