import torch 
from torch import nn 
import torch.nn.functional as F
from einops import repeat,rearrange,reduce
from einops.layers.torch import Rearrange, Reduce
from .attend import Attend
from torch.nn import Module, ModuleList
import torch
from torch import nn, einsum, Tensor
from torch.nn import Module, ModuleList
import torch.nn.functional as F

from typing import Optional, Union, Tuple

from einops import rearrange, reduce, repeat, pack, unpack
from einops.layers.torch import Rearrange

from .RevIN import RevIN

# helper functions

def exists(v):
    return v is not None

def default(v, d):
    return v if exists(v) else d

def identity(t, *args, **kwargs):
    return t

def cast_tuple(t):
    return (t,) if not isinstance(t, tuple) else t

# attention

class Attention(Module):
    def __init__(
        self,
        dim,
        dim_head = 32,
        heads = 4,
        dropout = 0.,
        flash = True
    ):
        super().__init__()
        self.scale = dim_head ** -0.5
        dim_inner = dim_head * heads

        self.to_qkv = nn.Sequential(
            nn.Linear(dim, dim_inner * 3, bias = False),
            Rearrange('b n (qkv h d) -> qkv b h n d', qkv = 3, h = heads)
        )

        self.to_v_gates = nn.Sequential(
            nn.Linear(dim, dim_inner, bias = False),
            nn.SiLU(),
            Rearrange('b n (h d) -> b h n d', h = heads)
        )

        self.attend = Attend(flash = flash, dropout = dropout)

        self.to_out = nn.Sequential(
            Rearrange('b h n d -> b n (h d)'),
            nn.Linear(dim_inner, dim, bias = False),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        q, k, v = self.to_qkv(x)

        out = self.attend(q, k, v)

        out = out * self.to_v_gates(x)
        return self.to_out(out)

# feedforward

class GEGLU(Module):
    def forward(self, x):
        x, gate = rearrange(x, '... (r d) -> r ... d', r = 2)
        return x * F.gelu(gate)

def FeedForward(dim, mult = 4, dropout = 0.):
    dim_inner = int(dim * mult * 2 / 3)
    return nn.Sequential(
        nn.Linear(dim, dim_inner * 2),
        GEGLU(),
        nn.Dropout(dropout),
        nn.Linear(dim_inner, dim)
    )

# main class

class ChannelTransformer(Module):
    def __init__(
        self,
        *,
        num_variates: int,
        lookback_len: int,
        depth: int,
        dim: int,
        num_tokens_per_variate = 1,
        dim_head = 32,
        heads = 4,
        attn_dropout = 0.,
        ff_mult = 4,
        ff_dropout = 0.,
        num_mem_tokens = 4,
        use_reversible_instance_norm = False,
        flash_attn = True
    ):
        super().__init__()
        self.num_variates = num_variates
        self.lookback_len = lookback_len

        self.mem_tokens = nn.Parameter(torch.randn(num_mem_tokens, dim)) if num_mem_tokens > 0 else None


        self.reversible_instance_norm = RevIN(num_variates) if use_reversible_instance_norm else None

        self.layers = ModuleList([])
        for _ in range(depth):
            self.layers.append(ModuleList([
                Attention(dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, flash = flash_attn),
                nn.LayerNorm(dim),
                FeedForward(dim, mult = ff_mult, dropout = ff_dropout),
                nn.LayerNorm(dim)
            ]))

        self.mlp_in = nn.Sequential(
            nn.Linear(lookback_len, dim * num_tokens_per_variate),
            Rearrange('b v (n d) -> b (v n) d', n = num_tokens_per_variate),
            nn.LayerNorm(dim)
        )


    def forward(
        self,
        x: Tensor,
        targets: Optional[Union[Tensor, Tuple[Tensor, ...]]] = None
    ):
        """
        einstein notation

        b - batch
        n - time
        v - variate
        """
        has_mem = exists(self.mem_tokens)
        assert x.shape[1:] == (self.lookback_len, self.num_variates),f"x shape {x.shape} {self.lookback_len=} ,{self.num_variates=}"

        # the crux of the paper is basically treating variates as the spatial dimension in attention
        # there is a lot of opportunity to improve on this, if the paper is successfully replicated

        x = rearrange(x, 'b n v -> b v n')

        if exists(self.reversible_instance_norm):
            x, reverse_fn = self.reversible_instance_norm(x)

        x = self.mlp_in(x)

        # memory tokens

        if has_mem:
            m = repeat(self.mem_tokens, 'm d -> b m d', b = x.shape[0])
            x, mem_ps = pack([m, x], 'b * d')

        # attention and feedforward layers

        for attn, attn_post_norm, ff, ff_post_norm in self.layers:
            x = attn(x) + x
            x = attn_post_norm(x)
            x = ff(x) + x
            x = ff_post_norm(x)

        # splice out memory tokens

        if has_mem:
            _, x = unpack(x, mem_ps, 'b * d')

        # reversible instance normaization, if needed

        if exists(self.reversible_instance_norm):
            x = reverse_fn(x)

        return x 

    
class FeatureDistance(Module):
    def __init__(self):
        super().__init__()

    def forward(self,x):
        # x shape (batch, num_variates, embedding_dim)
        dis = torch.cdist(x,x)
        return dis.sum(2).sum(1)/2
    


class DistanceJudger(nn.Module):
    def __init__(self,dim_in,num_var) -> None:
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim_in,dim_in*2),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(dim_in*2*num_var,dim_in*2),
            nn.ReLU(),
            nn.Linear(dim_in*2,1),
            nn.Sigmoid()
        )
    def forward(self,x):
        return self.net(x)
    

class DistanceLoss(nn.Module):
    def __init__(self):
        super().__init__()
        self.criterion = nn.BCELoss()

    def forward(self, x, y):
        return self.criterion(x,y)
    




import random

class Destroyer:
    def __init__(self, size_fraction: float):
        self.size_fraction = size_fraction

    def noise(self, x):
        num_noisy_elements = int(x.size(0) * self.size_fraction)
        noisy_indices = random.sample(range(x.size(0)), num_noisy_elements)
        u = torch.mean(x)
        s = torch.std(x)
        for index in noisy_indices:
            x[index] += torch.normal(u, 4*s)
        return x


         

if __name__ == "__main__":
    # x = torch.arange(0,10).float()
    # destroyer = Destroyer(0.3)
    # x = destroyer.noise(x)
    # print(x)
    # x = torch.randn(32,3,90)
    # transformer = ChannelTransformer(depth=3,seq_len=90,dim=128,
    #                                   dim_head = 32,heads=4,attn_dropout=0.1,ff_dropout=0.1,ff_mult=4)
    # x = transformer(x)
    # print(x.shape)

    x =  torch.randn(32,3,90)

    print(torch.cdist(x,x).sum(dim=1))

