# models/autocorrelation.py
import paddle
import paddle.nn as nn
import paddle.fft as fft

class AutoCorrelationMechanism(nn.Layer):
    """FFT自相关机制替代注意力 (O(L logL)复杂度)"""

    def __init__(self, d_model, top_k=3):
        super().__init__()
        self.top_k = top_k
        self.q_proj = nn.Linear(d_model, d_model)
        self.k_proj = nn.Linear(d_model, d_model)
        self.v_proj = nn.Linear(d_model, d_model)
        self.out_proj = nn.Linear(d_model, d_model)

    def forward(self, x):
        # 1. 投影变换
        q, k, v = self.q_proj(x), self.k_proj(x), self.v_proj(x)

        # 2. FFT计算自相关
        q_fft = fft.rfft(q, axis=1)
        k_fft = fft.rfft(k, axis=1)
        corr = fft.irfft(q_fft * paddle.conj(k_fft), axis=1)

        # 3. 选取top-k滞后周期
        _, top_lags = paddle.topk(corr.mean(axis=(0, 2)), k=self.top_k)

        # 4. 时延聚合
        aggregated = []
        for lag in top_lags:
            shifted = paddle.roll(v, shifts=int(lag), axis=1)

            # 修正权重形状使其与shifted匹配
            weight = corr[:, int(lag)].unsqueeze(1)  # [batch_size, 1, d_model]
            aggregated.append(shifted * weight)

        # 5. 输出融合
        out = sum(aggregated) / len(aggregated)
        return self.out_proj(out)
