from __future__ import absolute_import
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce, repeat
from einops.layers.torch import Reduce, Rearrange
from timm.layers import DropPath

from .sampling import DownsampleLayer
from .utils import MLP, CLSHead, precompute_ape, apply_abs_pe


class TFTemporalAttn(nn.Module):

    def __init__(self, d_model, drop=0.1):
        super().__init__()

        self.wq = nn.Linear(d_model, d_model)
        self.wk = nn.Linear(d_model, d_model)
        self.wv = nn.Linear(d_model, d_model)
        self.drop = nn.Dropout(drop)
        self.layer_norm = nn.LayerNorm(d_model)
        self.d_head = d_model

    def _attn_block(self, x, x_mask):
        bsz, nt, nc, nd = x.shape
        xq = rearrange(self.wq(x), "b t c d -> (b c) t d")
        xk = rearrange(self.wk(x), "b t c d -> (b c) t d")
        xv = rearrange(self.wv(x), "b t c d -> b c t d")

        attn = rearrange(torch.matmul(xq, xk.transpose(1, 2)) /
                         math.sqrt(self.d_head),
                         "(b c) tq tk -> b c tq tk",
                         b=bsz)

        mask = x_mask.transpose(1, 2)
        mask = mask[:, :, :, None] | mask[:, :, None, :]
        attn = torch.masked_fill(attn, mask, float('-inf'))
        attn = F.softmax(attn, -1).nan_to_num(0)

        out = torch.einsum("bcmn,bcnd->bmcd", attn, xv)

        return out

    def forward(self, x, x_mask):
        out = self._attn_block(x, x_mask)
        out = self.layer_norm(x + self.drop(out))

        return out


class TFChannelAttn(nn.Module):

    def __init__(self, d_model, drop=0.1):
        super().__init__()
        self.d_head = d_model
        self.wq = nn.Linear(d_model, d_model)
        self.wk = nn.Linear(d_model, d_model)
        self.wv = nn.Linear(d_model, d_model)
        self.drop = nn.Dropout(drop)
        self.layer_norm = nn.LayerNorm(d_model)

    def _attn_block(self, x, x_mask):
        xq = self.wq(x)
        xk = self.wk(x)
        xv = self.wv(x)
        attn = torch.einsum("btqd,btkd->btqk", xq, xk) / math.sqrt(self.d_head)
        attn_mask = x_mask[:, :, None, :] | x_mask[:, :, :, None]
        attn = torch.masked_fill(attn, attn_mask, float('-inf'))
        attn = F.softmax(attn, -1).nan_to_num(0)
        out = torch.einsum("btqk,btkd->btqd", attn, xv)
        return out

    def forward(self, x, x_mask, *args, **kwargs):
        out = self._attn_block(self.layer_norm(x), x_mask)
        out = x + self.drop(out)
        return out


class TFLayer(nn.Module):

    def __init__(self, d_model=64, r_hid=4, drop=0.2, c_mode='attn'):
        super().__init__()
        self.temporal = TFTemporalAttn(d_model, drop)
        if c_mode == 'mlp':
            self.channel = MLP(d_model, r_hid, drop)
        else:
            self.channel = TFChannelAttn(d_model, drop)
        self.mlp = MLP(d_model, r_hid, drop)

    def forward(self, x, x_mask):
        x = self.temporal(x, x_mask)
        x = self.channel(x, x_mask)
        x = self.mlp(x)
        return x


class Transformer(nn.Module):

    def __init__(self,
                 num_chn,
                 d_static,
                 num_cls,
                 ratios,
                 d_model=96,
                 r_hid=4,
                 drop=0.2,
                 norm_first=True,
                 down_mode='concat',
                 c_mode='attn',
                 **kwargs):
        super().__init__()
        self.d_model = d_model
        self.d_static = d_static
        self.ratios = ratios

        self.register_buffer('ape', precompute_ape(d_model))
        self.chn_emb = nn.Embedding(num_chn, d_model)
        self.cls_tok = nn.Parameter(torch.rand(num_chn, d_model))

        self.inp_layer = TFLayer(d_model, r_hid, drop, c_mode)
        self.mixers = nn.ModuleList()
        self.samplers = nn.ModuleList()
        for r in ratios:
            self.mixers.append(TFLayer(d_model, r_hid, drop, c_mode))
            self.samplers.append(DownsampleLayer(d_model, r, down_mode))

        self.cls_head = CLSHead(d_model, d_static, num_cls, drop)

    def forward(self, x, x_mask, t, x_static):
        bsz, nt, nc = x_mask.shape
        nt = nt + 1
        dev = x.device
        idx_t = repeat(t, "b t -> b t c", c=nc)
        idx_b = repeat(torch.arange(bsz, device=dev), "b -> b t c", t=nt, c=nc)
        idx_c = repeat(torch.arange(nc, device=dev), "c -> b t c", b=bsz, t=nt)
        c_feat = self.chn_emb(torch.arange(nc, device=dev))
        x = apply_abs_pe(x.nan_to_num(0)[..., None] * c_feat, idx_t, self.ape)
        x = self.inp_layer(x, x_mask)

        for sampler, mixer in zip(self.samplers, self.mixers):
            x, x_mask, idx_t = sampler(x, x_mask, idx_b, idx_t, idx_c, None)
            x = mixer(x, x_mask)

        outputs = []
        res = torch.masked_fill(x, x_mask[..., None], float("-inf"))
        outputs.append(res.reshape(bsz, -1, self.d_model).max(1).values)
        if self.d_static > 0:
            outputs.append(x_static)
        outputs = self.cls_head(torch.cat(outputs, -1))
        return outputs
