from layer.layers import (
    ChannelEmbedding,
    Attention,
    RevIN,
    exists,
    Feedforward,
    series_decomp,
    moving_avg,
    TransformerEncoder,
    FreqDecomp,
    PathDataEmbedding,
    RevIN_v2,
    ChannelMaskAttention,
    GroupChannelEmbed,
)
from x_transformers import Encoder
from einops import repeat, rearrange, reduce
from einops.layers.torch import Rearrange
from torch import nn
import torch


class iTransformer(nn.Module):
    def __init__(
        self,
        c_in: int,
        dim_model: int,
        pre_win: int,
        look_back: int,
        depth: int = 3,
        drop: float = 0.3,
        use_reversible_instance_norm: bool = True,
    ):
        super().__init__()
        self.channel_embed = ChannelEmbedding(win=look_back, d_model=dim_model)
        self.reversible_instance_norm = (
            RevIN(c_in, affine=True) if use_reversible_instance_norm else None
        )

        self.channel_layers = nn.ModuleList([])
        for _ in range(depth):
            self.channel_layers.append(
                (
                    nn.ModuleList(
                        [
                            Attention(dim_in=dim_model),
                            nn.LayerNorm(dim_model),
                            Feedforward(dim_model=dim_model, drop=drop),
                            nn.LayerNorm(dim_model),
                        ]
                    )
                )
            )
        self.projection = nn.Sequential(
            nn.Linear(dim_model, pre_win),
        )

    def encode(self, x, x_mark=None):
        if exists(self.reversible_instance_norm):
            x, rev_fn = self.reversible_instance_norm(x)
        x = self.channel_embed(x, x_mark)
        for attn, attn_norm, ff, ff_norm in self.channel_layers:
            x = attn(x) + x
            x = attn_norm(x)
            x = ff(x) + x
            x = ff_norm(x)

        pred = self.projection(x)
        if exists(self.reversible_instance_norm):
            pred = rev_fn(pred)
        return pred

    def forward(self, x, x_mark=None):
        return self.encode(x, None)


class PatchLinear(nn.Module):
    def __init__(
        self,
        c_in: int,
        patch_win: int,
        stride: int,
        kernel_size: int,
        dim: int,
        look_back: int,
        pre_win: int,
    ):
        super().__init__()
        self.decomp = series_decomp(kernel_size)
        self.embed = PathDataEmbedding(
            dim=dim, patch_win=patch_win, stride=stride, need_transpose=False
        )
        self.fc_reduce = nn.Linear(
            in_features=dim * ((look_back + stride - patch_win) // stride + 1),
            out_features=dim,
        )
        self.project = nn.Linear(in_features=dim, out_features=pre_win)
        self.revin = RevIN(num_variates=c_in)

    def forward(self, x, x_mark=None):
        x, rev_fn = self.revin(x)
        s, t = self.decomp(x)
        _s = rearrange(s, "b w c -> b c w")
        _t = rearrange(t, "b w c -> b c w")
        s_e = rearrange(self.embed(_s), "b c p e -> b c (p e)")  # b c p e
        s_t = rearrange(self.embed(_t), "b c p e -> b c (p e)")  # b c p e
        s_e = self.fc_reduce(s_e)
        s_t = self.fc_reduce(s_t)
        s_o = self.project(s_e)
        t_o = self.project(s_t)
        out = s_o + t_o
        out = rev_fn(out)
        return out


class LinearModel(nn.Module):
    def __init__(
        self,
        c_in: int,
        patch_win: int,
        stride: int,
        dim: int,
        pre_win: int,
        look_back: int,
        kernel_size: int = 25,
    ):
        super().__init__()
        self.revin = RevIN_v2(num_features=c_in)
        self.decomp = series_decomp(kernel_size)

        self.embed = PathDataEmbedding(
            dim=dim, patch_win=patch_win, stride=stride, need_transpose=False
        )
        self.channel_emb = nn.Linear(look_back, dim)
        self.fc_reduce = nn.Linear(
            dim, dim
        )  # nn.Linear(in_features=dim * ((look_back + stride - patch_win) // stride + 1), out_features=dim)
        self.project = nn.Linear(in_features=dim, out_features=pre_win)
        self.ffd = Feedforward(dim_model=dim, drop=0.2)
        self.channeMix = nn.Linear(c_in, c_in)

        self.mix_channel = nn.Sequential(
            nn.Linear(look_back, dim),
            nn.GELU(),
            Rearrange("b c e -> b e c"),
            nn.Linear(c_in, c_in),
            Rearrange("b e c -> b c e"),
            nn.GELU(),
        )

        self.mix_temp = nn.Sequential(
            nn.Linear(look_back, dim),
            nn.GELU(),
            nn.Linear(dim, dim * 4),  # b c e
            nn.GELU(),
            nn.Linear(dim * 4, dim),  # b c e
            nn.GELU(),
        )
        self.channel_ffd = Feedforward(dim_model=dim, drop=0.2)
        self.temp_ffd = Feedforward(dim_model=dim, drop=0.2)

    def forward(self, x, x_mark=None):
        x = rearrange(x, "b c w -> b w c")
        x = self.revin(x, "norm")
        x = rearrange(x, "b w c -> b c w ")
        s, t = self.decomp(x)
        _s = rearrange(s, "b w c -> b c w")
        _t = rearrange(t, "b w c -> b c w")
        s_channel_emb = self.mix_channel(_s)
        t_channel_emb = self.mix_channel(_t)

        s_temp_emb = self.mix_temp(_s)
        t_temp_emb = self.mix_temp(_t)

        o1 = self.project(self.channel_ffd(s_channel_emb + t_channel_emb))
        o2 = self.project(self.temp_ffd(s_temp_emb + t_temp_emb))

        # x = rearrange(x, "b w c -> b c w")
        # o2 = rearrange(self.channeMix(rearrange(F.gelu(self.channel_emb(x)),"b c e -> b e c")),"b e c -> b c e")
        # x = F.gelu(self.channel_emb(x))#rearrange(self.embed(x),"b c p e -> b c (p e)") # b c p e
        # o1 =self.fc_reduce(x)
        # o = self.project(self.ffd(o1+o2))
        o = o1 + o2
        out = self.revin(rearrange(o, "b c w -> b w c"), "denorm").permute(0, 2, 1)
        return out


class LinearModelV2(nn.Module):
    def __init__(
        self,
        c_in: int,
        patch_win: int,
        stride: int,
        dim: int,
        pre_win: int,
        look_back: int,
        kernel_size: int = 25,
    ):
        super().__init__()
        self.revin = RevIN(c_in)
        self.decomp = series_decomp(kernel_size)

        self.embed = PathDataEmbedding(
            dim=dim, patch_win=patch_win, stride=stride, need_transpose=False
        )
        self.channel_emb = nn.Linear(look_back, dim)
        self.fc_reduce = nn.Linear(
            dim, dim
        )  # nn.Linear(in_features=dim * ((look_back + stride - patch_win) // stride + 1), out_features=dim)
        patch_num = (look_back + stride - patch_win) // stride + 1
        self.project = nn.Linear(in_features=dim, out_features=pre_win)
        self.ffd = Feedforward(dim_model=dim, drop=0.2)
        self.channeMix = nn.Linear(c_in, c_in)

        self.mix_channel = nn.Sequential(
            nn.Linear(look_back, dim),
            nn.GELU(),
            Rearrange("b c e -> b e c"),
            nn.Linear(c_in, c_in),
            Rearrange("b e c -> b c e"),
            nn.GELU(),
        )

        self.mix_temp = nn.Sequential(  # b c p e
            Rearrange("b c p e -> b c e p "),
            nn.Linear(patch_num, patch_num),
            nn.GELU(),
            Rearrange("b c e p -> b c (p e)"),
            nn.Linear(patch_num * dim, dim),
            nn.GELU(),
        )

        self.channel_ffd = Feedforward(dim_model=dim, drop=0.2)
        self.temp_ffd = Feedforward(dim_model=dim, drop=0.2)

    def forward(self, x, x_mark=None):
        x, rev_fn = self.revin(x)
        s, t = self.decomp(x)
        _s = rearrange(s, "b w c -> b c w")
        _t = rearrange(t, "b w c -> b c w")
        s_channel_emb = self.mix_channel(_s)
        t_channel_emb = self.mix_channel(_t)

        s_temp_emb = self.mix_temp(self.embed(_s))  # self.mix_temp(_s)
        t_temp_emb = self.mix_temp(self.embed(_t))

        o1 = self.project(self.channel_ffd(s_channel_emb + t_channel_emb))
        o2 = self.project(self.temp_ffd(s_temp_emb + t_temp_emb))

        o = o1 + o2
        out = rev_fn(o)
        return out


class LinearModelV3(nn.Module):
    def __init__(
        self,
        c_in: int,
        patch_win: int,
        stride: int,
        dim: int,
        pre_win: int,
        look_back: int,
        kernel_size: int = 25,
        channel_patch_num: int = 4,
        use_norm: bool = False,
    ):
        super().__init__()
        self.revin = RevIN(c_in)
        self.decomp = series_decomp(kernel_size, use_smooth=False)
        self.channel_patch_num = channel_patch_num
        self.c_in = c_in
        self.channel_num = (
            self.c_in // self.channel_patch_num
        ) * self.channel_patch_num
        group_num = 4
        ss = 2
        self.embed = PathDataEmbedding(
            dim=dim,
            look_back=look_back,
            patch_win=patch_win,
            stride=stride,
            need_transpose=False,
        )
        self.group_channel_embed = GroupChannelEmbed(
            dim=dim,
            c_in=c_in,
            look_back=look_back,
            patch_win=group_num,
            stride=ss,
            need_transpose=False,
        )

        self.alpha = nn.Parameter(torch.tensor(0.5))
        num_channel_group = int((c_in - group_num) / ss + 1)
        self.project = nn.Sequential(
            nn.LayerNorm(dim) if use_norm else nn.Identity(),
            nn.Linear(in_features=dim, out_features=pre_win),
        )
        patch_num = (look_back + stride - patch_win) // stride + 1
        self.mix_channel = nn.Sequential(
            Rearrange("b c e -> b e c"),
            nn.Linear(num_channel_group, num_channel_group * 2),
            nn.GELU(),
            nn.Linear(num_channel_group * 2, num_channel_group),
            nn.GELU(),
            nn.Linear(num_channel_group, c_in),  # b e c
            nn.GELU(),
            Rearrange("b e c -> b c e"),
        )

        self.mix_temp = nn.Sequential(  # b c p e
            Rearrange("b c p e -> b c e p "),
            nn.Linear(patch_num, patch_num * 2),  # b c e p -> b c p e -
            nn.GELU(),
            nn.Linear(patch_num * 2, patch_num),  # b c e p -> b c p e -
            nn.GELU(),
            Rearrange("b c e p -> b c (p e)"),
            nn.Linear(patch_num * dim, dim),  # max(1, int(patch_num * (1 - 0.2)))
            nn.GELU(),
        )
        self.mix_channel2 = nn.Sequential(
            Rearrange("b c e -> b e c"),
            nn.Linear(num_channel_group, num_channel_group * 2),
            nn.GELU(),
            nn.Linear(num_channel_group * 2, num_channel_group),
            nn.GELU(),
            nn.Linear(num_channel_group, c_in),  # b e c
            nn.GELU(),
            Rearrange("b e c -> b c e"),
        )
        self.mix_temp2 = nn.Sequential(  # b c p e
            Rearrange("b c p e -> b c e p "),
            nn.Linear(patch_num, patch_num * 2),  # b c e p -> b c p e -
            nn.GELU(),
            nn.Linear(patch_num * 2, patch_num),  # b c e p -> b c p e -
            Rearrange("b c e p -> b c (p e)"),
            nn.Linear(patch_num * dim, dim),
            nn.GELU(),
        )
        self.channel_ffd = Feedforward(dim_model=dim, drop=0.2)
        self.temp_ffd = Feedforward(dim_model=dim, drop=0.2)

    def forward(self, x, x_mark=None):
        x, rev_fn = self.revin(x)
        s, t = self.decomp(x)
        _s = rearrange(s, "b w c -> b c w")
        _t = rearrange(t, "b w c -> b c w")
        s_channel_emb = self.mix_channel(self.group_channel_embed(s))
        t_channel_emb = self.mix_channel2(self.group_channel_embed(t))

        s_temp_emb = self.mix_temp(self.embed(_s))  # self.mix_temp(_s)
        t_temp_emb = self.mix_temp2(self.embed(_t))

        o = self.project(
            self.alpha * self.channel_ffd(s_channel_emb + t_channel_emb)
            + (1 - self.alpha) * self.temp_ffd(s_temp_emb + t_temp_emb)
        )
        out = rev_fn(o)
        return out


class MaskFormer(nn.Module):
    def __init__(
        self,
        c_in: int,
        dim: int,
        head: int,
        drop: float,
        depth: int,
        look_back: int,
        pre_win: int,
        dim_head: int,
        mask_rate: float,
    ):
        super().__init__()
        self.embed = ChannelEmbedding(win=look_back, d_model=dim)
        self.layers = nn.ModuleList([])
        self.mask_num = int(mask_rate * c_in)
        self.c_in = c_in
        for _ in range(depth):
            self.layers.append(
                nn.ModuleList(
                    [
                        ChannelMaskAttention(
                            dim_in=dim, dim_head=dim_head, drop_out=drop, head=head
                        ),
                        nn.LayerNorm(dim),
                        Feedforward(dim_model=dim, drop=drop),
                        nn.LayerNorm(dim),
                    ]
                )
            )
        self.project = nn.Linear(dim, pre_win)
        self.reconstruct = nn.Linear(dim, look_back)
        self.revin = RevIN(c_in)

    def forward(self, x, x_mark=None, mode="pretrain"):
        # x.shape = b c w
        x, rev_fn = self.revin(x)
        if mode == "pretrain":
            masked_idx = torch.randperm(self.c_in)
            mask = torch.ones(self.c_in)
            mask[masked_idx[: self.mask_num]] = 0
            x = self.embed(x)
            for attn, attn_norm, ff, ff_norm in self.layers:
                x = attn(x, mask) + x
                x = attn_norm(x)
                x = ff(x) + x
                x = ff_norm(x)
            pred = self.project(x)
            recon = self.reconstruct(x)
            pred = rev_fn(pred)
            recon = rev_fn(recon)
            return pred, recon, mask
        else:
            x = self.embed(x)
            for attn, attn_norm, ff, ff_norm in self.layers:
                x = attn(x, None) + x
                x = attn_norm(x)
                x = ff(x) + x
                x = ff_norm(x)
            pred = self.project(x)
            pred = rev_fn(pred)
            return pred, None, None


class FreqFormer(nn.Module):
    def __init__(
        self,
        c_in: int,
        patch_win: int,
        stride: int,
        dim: int,
        pre_win: int,
        look_back: int,
        depth: int = 4,
        heads: int = 4,
        use_reversible_instance_norm:bool = True,
        kernel_size: int = 25,
        use_norm: bool = False,
    ):
        super().__init__()

        self.use_reversible_instance_norm = use_reversible_instance_norm
        self.feature_enc = Encoder(
            dim=dim,
            depth=depth,
            heads=heads,
            use_rmsnorm = True
            # use_adaptive_layernorm=True,
        )  # TransformerEncoder(dim=dim,depth=depth,heads=heads)
        self.revin = RevIN(c_in,affine=True)
        self.revin2 = RevIN(c_in,affine=True)
        patch_num = (look_back + stride - patch_win) // stride + 1
        self.noise_pred = nn.Sequential(
            Rearrange("(b c ) p e -> b c (p e)", c=c_in),
            nn.Linear(patch_num * dim, pre_win),
        )
        self.main_pred = nn.Sequential(
            Rearrange("(b c ) p e -> b c (p e)", c=c_in),
            nn.Linear(patch_num * dim, pre_win),
        )
        self.embed = PathDataEmbedding(
            dim=dim,
            look_back=look_back,
            patch_win=patch_win,
            stride=stride,
            need_transpose=False,
        )
        self.channel_scale = nn.Parameter(torch.ones(c_in,1)) # c_in,1

    def forward(self, x, x_mark=None):
        # x : b c w
        # 7*1000000
        # 7 * 512 ()
        # x = x * self.channel_scale
        if self.use_reversible_instance_norm:
            x, rev_fn = self.revin(x)
            x,rev_fn2 = self.revin2(x)
        ori, noise = FreqDecomp(x, quantile=0.5)

        ori_emb = self.embed(ori)  # b c p e
        ori_emb = rearrange(ori_emb, "b c p e -> (b c) p e")

        noise_emb = self.embed(noise)  # b c p e
        noise_emb = rearrange(noise_emb, "b c p e -> (b c) p e")

        # c w -f> c p

        #  (x-u)/s -> y * s  + u 
        # c w -f> 

        ori_emb = self.feature_enc(ori_emb)
        noise_emb = self.feature_enc(noise_emb)
        p1 = self.main_pred(ori_emb)
        p2 = self.noise_pred(noise_emb)
        if self.use_reversible_instance_norm:
            pred = rev_fn(p1 + p2)
            pred = rev_fn2(pred)
        else:
            pred = p1 + p2 
        #pred = pred * self.channel_scale
        return pred


class ChannelMoe(nn.Module):
    def __init__(
        self,
        c_in: int,
        patch_win: int,
        stride: int,
        dim: int,
        pre_win: int,
        look_back: int,
        depth: int = 4,
        heads: int = 4,
        kernel_size: int = 25,
        use_norm: bool = False,
    ):
        super().__init__()
        self.embed = PathDataEmbedding(
            dim=dim,
            look_back=look_back,
            patch_win=patch_win,
            stride=stride,
            need_transpose=False,
        )
        self.expert = nn.ModuleList(
            [Encoder(dim=dim, pre_norm=False, depth=1, heads=heads)]
        )


if __name__ == "__main__":
    model = iTransformer(c_in=3, dim_model=256, pre_win=100, look_back=200).cuda(7)
    x = torch.randn((128, 3, 200)).cuda(7)
    out = model(x)
    print(out.shape)
