import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

# 频谱相似度计算函数
def spectral_similarity(a, b):
    a_fft = torch.fft.rfft(a, dim=1)
    b_fft = torch.fft.rfft(b, dim=1)
    return F.cosine_similarity(a_fft.abs(), b_fft.abs(), dim=-1).mean()

# 动态合并函数
def dynamic_merge(patches, threshold=0.73428):
    merged = []
    num = 1
    acc = patches[0]
    ans = []
    for p in patches[1:]:
        sim = spectral_similarity(acc, p)
        if sim < threshold:
            merged.append(acc / num)
            acc = p
            num = 1
        else:
            acc = acc + p
            num = num + 1
        ans.append(sim.detach().cpu().item())
    # print(np.array(ans).sum() / len(ans))
    merged.append(acc / num)
    return merged

# 局部窗口注意力
class LocalWindowAttention(nn.Module):
    def __init__(self, d_model, window_size):
        super().__init__()
        self.window_size = window_size
        self.query = nn.Linear(d_model, d_model)
        self.key = nn.Linear(d_model, d_model)
        self.value = nn.Linear(d_model, d_model)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        B, L, C = x.shape
        # 填充保证长度可被窗口整除
        pad_len = (self.window_size - L % self.window_size) % self.window_size
        x = F.pad(x, (0, 0, 0, pad_len))
        
        # 窗口划分 [B, num_windows, window_size, C]
        x = x.view(B, -1, self.window_size, C)
        
        # 计算注意力
        q = self.query(x)
        k = self.key(x)
        v = self.value(x)
        
        attn = self.softmax(torch.matmul(q, k.transpose(-2, -1)) / (C ** 0.5))
        out = torch.matmul(attn, v)
        
        # 恢复形状并去除填充
        out = out.view(B, -1, C)
        return out[:, :L, :]

# 频域稀疏注意力
class FrequencySparseAttention(nn.Module):
    def __init__(self, d_model, topk=10):
        super().__init__()
        self.topk = topk
        self.d_model = d_model
        self.query = nn.Linear(d_model, d_model)
        self.key = nn.Linear(d_model, d_model)
        self.value = nn.Linear(d_model, d_model)

    def forward(self, x):
        B, L, C = x.shape
        x_freq = torch.fft.rfft(x, dim=1)
        
        # 选择top-k频率成分
        magnitudes = torch.abs(x_freq).mean(dim=(0, 2))
        _, indices = torch.topk(magnitudes, self.topk)
        sparse_freq = x_freq[:, indices, :]
        
        # 注意力计算
        q = self.query(sparse_freq.real)
        k = self.key(sparse_freq.real)
        # v = self.value(sparse_freq.real)
        v = sparse_freq
        
        attn = F.softmax(torch.matmul(q, k.transpose(-2, -1)) / (C ** 0.5), dim=-1)
        out = torch.matmul(torch.complex(attn, torch.zeros_like(attn)), v)
        
        # 逆变换回时域
        full_freq = torch.zeros_like(x_freq)
        full_freq[:, indices, :] = out
        return torch.fft.irfft(full_freq, n=L, dim=1)

# 自适应多尺度划分
class AdaptiveMultiScalePartition(nn.Module):
    def __init__(self, min_scale=4, merge_threshold=0.73428):
        super().__init__()
        self.min_scale = min_scale
        self.merge_threshold = merge_threshold

    def forward(self, x):
        B, L, C = x.shape
        scales = [self.min_scale * (2**i) for i in range(3) if self.min_scale*(2**i) <= L]
        all_patches = []
        
        for scale in scales:
            # 划分patches
            num_patches = L // scale
            if num_patches == 0:
                continue
            patches = x[:, :num_patches*scale].view(B, num_patches, scale, C)
            
            # 动态合并
            merged = dynamic_merge([patches[:,i] for i in range(num_patches)], self.merge_threshold)
            all_patches.extend(merged)
        
        return all_patches

# 趋势流解码器
class LowPassFilterDecoder(nn.Module):
    def __init__(self, d_model):
        super().__init__()
        self.conv = nn.Conv1d(d_model, d_model, kernel_size=3, padding=1, padding_mode='replicate')
        self.gru = nn.GRU(d_model, d_model, batch_first=True)

    def forward(self, x):
        x = x.transpose(1, 2)  # [B, C, L]
        x = self.conv(x).transpose(1, 2)
        return self.gru(x)[0]

# 残差流解码器
class SparseTransformerDecoder(nn.Module):
    def __init__(self, d_model, n_heads):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, n_heads)
        self.norm = nn.LayerNorm(d_model)
        
    def forward(self, x):
        x = x.transpose(0, 1)  # [L, B, C]
        attn_output, _ = self.self_attn(x, x, x)
        return self.norm(attn_output + x).transpose(0, 1)

# 主模型架构
class DMSFTransformer(nn.Module):
    def __init__(self, d_model, n_heads, seq_len, window_size=24):
        super().__init__()
        # 混合注意力层
        self.local_attn = LocalWindowAttention(d_model, window_size)
        self.global_attn = FrequencySparseAttention(d_model, topk=10)
        self.fusion_gate = nn.Sequential(
            nn.Linear(2*d_model, d_model),
            nn.Sigmoid()
        )
        
        # 动态划分模块
        self.amsp = AdaptiveMultiScalePartition(min_scale=4, merge_threshold=0.75)
        
        # 双流解码器
        self.trend_stream = LowPassFilterDecoder(d_model)
        self.residual_stream = SparseTransformerDecoder(d_model, n_heads)
        
        # 输出投影
        self.proj = nn.Linear(2*d_model, d_model)
        self.restoreDim = nn.Linear(1, seq_len)

    def forward(self, x):
        # [bs, l, m]
        # 混合注意力
        local_feat = self.local_attn(x) # local_feat: [bs, l, m]
        global_feat = self.global_attn(x) # global_feat: [bs, l, m]
        gate = self.fusion_gate(torch.cat([local_feat, global_feat], dim=-1))
        fused = gate * local_feat + (1 - gate) * global_feat
        
        # 多尺度划分与处理
        patches = self.amsp(fused)
        processed = [self._process_patch(p) for p in patches]
        # for item in patches: print(item.shape)
        # print()
        
        # 双流解码
        combined = torch.cat(processed, dim=1).permute(0, 2, 1).mean(dim=-1,keepdim=True)
        combined = self.restoreDim(combined).permute(0, 2, 1) + x
        trend = self.trend_stream(combined)
        residual = self.residual_stream(combined)
        
        combined = torch.cat([trend, residual], dim=-1)

        return self.proj(combined)
    
    def _process_patch(self, patch):
        # 示例处理函数（可根据需要修改）
        return patch.mean(dim=1, keepdim=True)

if __name__ == "__main__":
    # 测试配置
    d_model = 512
    n_heads = 8
    batch_size = 4
    seq_len = 256
    
    # 初始化模型
    model = DMSFTransformer(d_model, n_heads, seq_len)
    
    # 生成测试数据
    x = torch.randn(batch_size, seq_len, d_model)
    
    # 前向传播
    output = model(x)
    print(f"输入形状: {x.shape}")
    print(f"输出形状: {output.shape}")  # 应输出 [4, 256, 512]