from __future__ import absolute_import
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce, repeat
from einops.layers.torch import Reduce, Rearrange
from timm.layers import DropPath

from .sampling import DownsampleLayer
from .utils import *


class TokenMixingLayer(nn.Module):

    def __init__(self, d_model=64, r_hid=4, drop=0.2, norm_first=False,use_channel_attn=False,use_mixer=True,moe=False,use_swiglu=False):
        super().__init__()
        self.temporal = TemporalAttn(d_model, drop, norm_first)
        if use_mixer:
            self.mixer = TokenMixingAttn(d_model, drop, norm_first, moe)
        if use_channel_attn:
            self.channel = ChannelAttn(d_model, drop, norm_first)
        # self.channel = ChannelAttn(d_model, drop, norm_first)
        if use_swiglu:
            self.mlp3 = SwiGLUMLP(d_model, r_hid, drop, norm_first)
        else:
            self.mlp3 = MLP(d_model, r_hid, drop, norm_first)

    def forward(self, x, x_mask, cls_tok, pos, pe, idx_c, pe_type='rel',use_channel_attn=False,use_mixer=True,moe=False):
        # x: b t c d
        # x_mask: b t c
        # cls_tok: c d
        # pos: b t c
        x = torch.concat([cls_tok, x], dim=1)
        x_mask = F.pad(x_mask, (0, 0, 1, 0), 'constant', False)
        p_mask = pos < 0
        pos = F.pad(pos + 1, (0, 0, 1, 0), 'constant', 0)
        p_mask = F.pad(p_mask, (0, 0, 1, 0), 'constant', False)
        x, imp = self.temporal(x, x_mask, pos, pe, pe_type)  # imp b c tq+1 tk+1
        # if torch.isnan(imp).any():
        #     print('temporal imp has nan')            
        # elif torch.isinf(imp).any():
        #     print('temporal imp has inf')
        #     #print('imp', imp)
        # if torch.isnan(x).any():
        #     print('temporal x has nan')
        # elif torch.isinf(x).any():
        #     print('temporal x has inf')
        #     #print('x', x)
        if use_mixer:
            imp = imp[:, :, 0, :].argmax(1)  # b tk
            x, imp = self.mixer(x, x_mask, p_mask, pos, pe, imp, idx_c, pe_type, True,moe)
            # if torch.isnan(imp).any():
            #     print('mixer imp has nan')            
            # elif torch.isinf(imp).any():
            #     print('mixer imp has inf')
            #     #print('imp', imp)
            # if torch.isnan(x).any():
            #     print('mixer x has nan')
            # elif torch.isinf(x).any():
            #     print('mixer x has inf')
        #simplfy model
        #x = self.channel(x, x_mask)
        if use_channel_attn:
            x = self.channel(x, x_mask)
        
        x = self.mlp3(x)
        return x[:, 1:, :, :], x[:, [0], :, :], imp


class MTM(nn.Module):

    def __init__(self,
                 num_chn,
                 d_static,
                 num_cls,
                 ratios,
                 d_model=96,
                 r_hid=4,
                 drop=0.2,
                 norm_first=True,
                 down_mode='concat',
                 vocab_size=16,
                 embedding_size=4,
                 use_channel_attn=False,
                 use_mixer=True,
                 moe=False,
                 use_swiglu=False,
                 **kwargs):
        super().__init__()
        self.d_model = d_model
        self.d_static = d_static
        self.ratios = ratios

        self.register_buffer('rpe', precompute_rpe(d_model))
        self.register_buffer('ape', precompute_ape(d_model))
        self.embedding = nn.Embedding(vocab_size, embedding_size)
        self.chn_emb = nn.Embedding(num_chn, d_model)
        nn.init.xavier_uniform_(self.chn_emb.weight)
        self.cls_tok = nn.Parameter(torch.rand(num_chn, d_model))

        self.inp_layer = TokenMixingLayer(d_model, r_hid, drop, norm_first,use_channel_attn,use_mixer,moe,use_swiglu)
        self.mixers = nn.ModuleList()
        self.samplers = nn.ModuleList()
        for r in ratios:
            self.mixers.append(TokenMixingLayer(d_model, r_hid, drop, norm_first,use_channel_attn,use_mixer,moe,use_swiglu))
            self.samplers.append(DownsampleLayer(d_model, r, down_mode))

        self.cls_head = CLSHead(d_model, d_static, num_cls, drop)

    def forward(self, signal,kmer, x_mask, t, x_static,use_channel_attn=False,use_mixer=True,moe=False):
        kmer_embed = self.embedding(kmer)
        x=torch.cat([signal,kmer_embed],dim=-1)
        bsz, nt, nc = x_mask.shape
        nt = nt + 1  # cls token
        dev = x.device
        idx_t = repeat(t, "b t -> b t c", c=nc)
        idx_b = repeat(torch.arange(bsz, device=dev), "b -> b t c", t=nt, c=nc)
        idx_c = repeat(torch.arange(nc, device=dev), "c -> b t c", b=bsz, t=nt)
        c_feat = self.chn_emb(torch.arange(nc, device=dev))
        x = apply_abs_pe(x.nan_to_num(0)[..., None] * c_feat, idx_t, self.ape)
        cls_tok = repeat(self.cls_tok, "c d -> b 1 c d", b=bsz)
        x, cls_tok, imp = self.inp_layer(x, x_mask, cls_tok, idx_t, self.rpe, idx_c,use_channel_attn=use_channel_attn,use_mixer=use_mixer,moe=moe)

        for sampler, mixer in zip(self.samplers, self.mixers):
            x, x_mask, idx_t = sampler(x, x_mask, idx_b, idx_t, idx_c, imp)
            x, cls_tok, imp = mixer(x, x_mask, cls_tok, idx_t, self.rpe, idx_c,use_channel_attn=use_channel_attn,use_mixer=use_mixer,moe=moe)

        outputs = [reduce(cls_tok, "b 1 c d -> b d", 'max')]
        if self.d_static > 0:
            outputs.append(x_static)
        outputs = self.cls_head(torch.cat(outputs, -1))
        return outputs
