
from typing import Any

from lightning.pytorch.utilities.types import STEP_OUTPUT
import torch
from torch import nn, optim
from torch import einsum
from einops import repeat, rearrange, reduce,pack,unpack
from einops.layers.torch import Rearrange
from torch.optim.lr_scheduler import LinearLR, StepLR, CosineAnnealingLR

from layer.embeds import DataEmbedding, ChannelEmbedding, PathDataEmbedding, GroupChannelEmbed
import lightning as L
import torch.nn.functional as F
from layer.revin import RevIN, RevIN_v2
class Residual(nn.Module):
    def __init__(self, fn):
        super().__init__()
        self.fn = fn

    def forward(self, x):
        return x + self.fn(x)


def exists(v):
    return v is not None


def default(v, d):
    return v if exists(v) else d


def identity(t, *args, **kwargs):
    return t


def cast_tuple(t):
    return (t,) if not isinstance(t, tuple) else t


class moving_avg(nn.Module):
    """
    Moving average block to highlight the trend of time series
    """

    def __init__(self, kernel_size, stride):
        super(moving_avg, self).__init__()
        self.kernel_size = kernel_size
        self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)

    def forward(self, x):
        # padding on the both ends of time series
        x = rearrange(x, "b c l -> b l c")
        front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)
        end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)
        x = torch.cat([front, x, end], dim=1)
        x = self.avg(x.permute(0, 2, 1))  # x [b c l
        x = x.permute(0, 2, 1)  # x [b l c ]
        return x


class series_decomp(nn.Module):
    """
    Series decomposition block
    """

    def __init__(self, kernel_size,use_smooth=False):
        super(series_decomp, self).__init__()
        self.moving_avg = moving_avg(kernel_size, stride=1)
        self.smooth = moving_avg(3, stride=1)
        self.use_smooth = use_smooth

    def forward(self, x):
        # x b c w
        moving_mean = self.moving_avg(x)  # b l c
        res = rearrange(x, "b c w -> b w c") - moving_mean

        if self.use_smooth:
            smooth = self.smooth(res.permute(0,2,1))
            res = res - smooth
            return smooth, moving_mean,res

        # import pdb
        # pdb.set_trace()
        return res, moving_mean


class Attention(nn.Module):
    def __init__(self, dim_in, drop_out: float = 0.2, dim_head: int = 256, head: int = 4):
        super().__init__()
        self.dim = dim_head
        self.drop_out = drop_out
        self.head = head
        self.scale = dim_head ** -0.5
        self.to_qkv = nn.Linear(dim_in, head * dim_head * 3, bias=False)
        self.to_out = nn.Sequential(nn.Linear(dim_head * head, dim_in, bias=False),
                                    nn.Dropout(drop_out))

    def forward(self, x):
        # x: b s e
        q, k, v = torch.chunk(self.to_qkv(x), chunks=3, dim=-1)  # b s
        q, k, v = map(lambda t: rearrange(t, "b s (h d) -> b h s d", h=self.head), [q, k, v])

        score = einsum("bhsd,bhqd->bhsq", q, k)
        score = (self.scale * score).softmax(dim=-1)
        attn = einsum("bhsq,bhqe->bhse", score, v)
        # merge head
        attn = rearrange(attn, "b h s e -> b s (h e)")
        return self.to_out(attn)


class Feedforward(nn.Module):
    def __init__(self, dim_model, drop:float=0.2):
        super().__init__()
        self.dim_model = dim_model
        self.drop = drop
        self.ffd = nn.Sequential(
            nn.Linear(self.dim_model, self.dim_model * 4),
            nn.GELU(),
            nn.Dropout(self.drop),
            nn.Linear(self.dim_model * 4, self.dim_model)
        )

    def forward(self, x):
        return self.ffd(x)




class ChannelMaskAttention(nn.Module):
    def __init__(self, dim_in, drop_out: float = 0.2, dim_head: int = 256, head: int = 4):
        super().__init__()
        self.dim = dim_head
        self.drop_out = drop_out
        self.head = head
        self.scale = dim_head ** -0.5
        self.to_qkv = nn.Linear(dim_in, head * dim_head * 3, bias=False)
        self.to_out = nn.Sequential(nn.Linear(dim_head * head, dim_in, bias=False),
                                    nn.Dropout(drop_out))

    @property
    def device(self):
        return next(self.parameters()).device

    def forward(self, x,mask=None):
        # x: b s e
        q, k, v = torch.chunk(self.to_qkv(x), chunks=3, dim=-1)  # b s
        q, k, v = map(lambda t: rearrange(t, "b s (h d) -> b h s d", h=self.head), [q, k, v])

        score = einsum("bhsd,bhqd->bhsq", q, k)
        score = self.scale * score

        if mask is not None:
            score = score.masked_fill(~mask.bool().to(self.device), -1e9) # s e ->
        score = score.softmax(dim=-1)
        attn = einsum("bhsq,bhqe->bhse", score, v)
        # merge head
        attn = rearrange(attn, "b h s e -> b s (h e)")
        return self.to_out(attn)


class PatchDropout(nn.Module):
    def __init__(self, prob):
        super().__init__()
        assert 0 <= prob < 1.
        self.prob = prob

    def forward(self, x):
        if not self.training or self.prob == 0.:
            return x

        b,c,n, _, device = *x.shape, x.device

        batch_indices = torch.arange(b, device = device)
        batch_indices = rearrange(batch_indices, '... -> ... 1')
        num_patches_keep = max(1, int(n * (1 - self.prob)))
        patch_indices_keep = torch.randn(b, n, device = device).topk(num_patches_keep, dim = -1).indices
        x = rearrange(x,"b c p e -> c b p e")
        return rearrange(x[:,batch_indices, patch_indices_keep,:],"c b p e -> b c p e")



class TransformerEncoder(nn.Module):
    def __init__(self,depth,dim,heads):
        super().__init__()
        self.depth = depth 
        self.dim = dim 
        self.heads = heads

        self.layers = nn.ModuleList([])

        for _ in range(self.depth):
            self.layers.append(
                nn.ModuleList([
                    Attention(dim_in=dim,head=heads),
                    nn.LayerNorm(dim),
                    Feedforward(dim_model=dim),
                    nn.LayerNorm(dim)
                ])
            )
        
    def forward(self,x):
        for attn,attn_norm,ff,ff_norm in self.layers:
            x = attn(x) + x 
            x = attn_norm(x) 
            x = ff(x) + x 
            x = ff_norm(x)
        return x 



def FreqDecomp(x,quantile=0.01):
    freq_x = torch.fft.rfft(x,dim=-1) 
    freq_x_abs = freq_x.abs()
    thresh = torch.quantile(freq_x_abs,quantile,dim=-1,keepdim=True)
    ori = torch.where(freq_x_abs<=thresh,torch.tensor(0.+0.j,device=x.device),freq_x)
    ori = torch.fft.irfft(ori).float()
    noise = x - ori 
    return ori,noise


# class AttentionFFDMOE(nn.Module):
#     def __init__(self,dim,head,depth,num_ffd=4):
#         super().__init__()
#         self.layers = nn.ModuleList([])
#         self.
#         for _ in range(depth):
#             self.layers.append(
#                 nn.ModuleList([
#                 Attention(dim_in=dim,head=head),
#                 nn.LayerNorm(dim),
#                 nn.ModuleList([Feedforward(dim_model=dim) for _ in range(num_ffd)]),
#                 nn.LayerNorm(dim)])
#             )

#     def forward(self,x):
#         for attn,attn_norm,ffd_expert,ffd_norm in self.layers:
#             x = attn(x) + x 
#             x = attn_norm(x)

        


if __name__ == '__main__':
    from x_transformers import Encoder

    ori,noise = FreqDecomp(torch.randn(3,7,96))
    print(ori[0,0])
    print(noise[0,0])

    # encoder = Encoder(
    #     dim = 256,
    #     depth = 4,
    #     heads = 4
    # )
    # print(encoder)
    # decom = series_decomp(25,True)
    # x = torch.randn((3,7,16))
    # a,b,n = decom(x)
    # a = a.permute(0,2,1)
    # b = b.permute(0,2,1)
    # print(torch.allclose(a+b,x))
