import torch
import torch.nn as nn
import torch.nn.functional as F
from model import LinearGeneral, CAFIA_Transformer
from utils import freeze_but_type, assert_model_all_freezed, assert_all_module_type_freezed, set_freeze_all
import os

class LearnableMask(nn.Module):
    def __init__(self, dim, prune_rate=1.) -> None:
        super().__init__()
        self.dim = dim
        self.mask = nn.Parameter(torch.ones(dim))
        self.prune_rate = prune_rate

    def forward(self, x):
        mask = torch.diag(self.prune_rate * self.dim *
                          torch.softmax(self.mask, 0))
        x = torch.matmul(x, mask)
        return x, mask

    def get_mask(self):
        return self.prune_rate * self.dim * torch.softmax(self.mask, 0)


class MaskedSelfAttention(nn.Module):
    def __init__(self, in_dim, heads=8, dropout_rate=0.1, prune_rate=.8):
        super(MaskedSelfAttention, self).__init__()
        self.heads = heads
        self.head_dim = in_dim // heads
        self.scale = self.head_dim ** 0.5
        self.prune_rate = prune_rate

        self.query = LinearGeneral((in_dim,), (self.heads, self.head_dim))
        self.key = LinearGeneral((in_dim,), (self.heads, self.head_dim))
        self.value = LinearGeneral((in_dim,), (self.heads, self.head_dim))
        self.out = LinearGeneral((self.heads, self.head_dim), (in_dim,))

        self.mask = LearnableMask(self.heads, self.prune_rate)

        if dropout_rate > 0:
            self.dropout = nn.Dropout(dropout_rate)
        else:
            self.dropout = None

    def forward(self, x, need_mask=False):
        b, n, _ = x.shape

        q = self.query(x, dims=([2], [0]))  # b,n,heads,head_dim
        k = self.key(x, dims=([2], [0]))
        v = self.value(x, dims=([2], [0]))

        q = q.permute(0, 2, 1, 3)
        k = k.permute(0, 2, 1, 3)
        v = v.permute(0, 2, 1, 3)  # b,heads,n,head_dim

        attn_weights = torch.matmul(q, k.transpose(-2, -1)) / self.scale
        attn_weights = F.softmax(attn_weights, dim=-1)  # b,heads,n,n
        out = torch.matmul(attn_weights, v)  # b, heads, n, head_dim
        out = out.permute(0, 2, 3, 1)  # b, n, head_dim, heads
        out, mask = self.mask(out)
        out = out.permute(0, 1, 3, 2)  # b, n, heads, head_dim
        out = self.out(out, dims=([2, 3], [0, 1]))

        return (out, mask) if need_mask else out

    def get_mask(self):
        return self.mask.get_mask()


def build_mvit(args):
    args.attn_type = MaskedSelfAttention
    args.vit_model = None
    mvit = CAFIA_Transformer(args)
    if hasattr(args, 'cifar10_vit'):
        if hasattr(args, 'pwd'):
            args.cifar10_vit = os.path.join(args.pwd, args.cifar10_vit)
        mvit = load_weight_for_mvit(mvit, path=args.cifar10_vit)
        print(f"load pretrained weight from {args.cifar10_vit}.")
    else:
        print(f"no pretrained weight loaded cause no path is specified.")
    return mvit


def test_mmsa():
    b, n, in_dim, heads = 64, 32, 888, 8
    in_shape = (b, n, in_dim)
    mmsa = MaskedSelfAttention(in_dim=in_dim, heads=heads)
    out, mask = mmsa(torch.randn(*in_shape), need_mask=True)
    print(mask)
    assert out.shape == (b, n, in_dim), \
        f'test failed expected out dim {in_shape}, got {out.shape}'


def test_cafia_tfm():
    import json
    from argparse import Namespace
    args = Namespace(
        **json.load(open('Vision-Transformer-ViT/ViT-B_16-224.json', 'r')))
    args.attn_type = MaskedSelfAttention
    args.vit_model = None

    vit = CAFIA_Transformer(args)

    b, c, h, w = (args.batch_size, 3, args.image_size, args.image_size)

    out = vit(torch.randn(*(b, c, h, w)))
    assert out.shape == (b, args.num_classes), \
        f'test failed expected out dim {(b, args.num_classes)}, got {out.shape}'


def test_mvit_bp():
    import json
    from argparse import Namespace
    args = Namespace(
        **json.load(open('Vision-Transformer-ViT/ViT-B_16-224.json', 'r')))
    args.attn_type = MaskedSelfAttention
    args.vit_model = None

    device = 'cuda:0'

    vit = CAFIA_Transformer(args).to(device)

    b, c, h, w = (args.batch_size, 3, args.image_size, args.image_size)

    out = vit(torch.randn(*(b, c, h, w)).to(device))
    label = torch.randn(b, args.num_classes).to(device)

    loss = F.cross_entropy(out, label)
    loss.backward()
    print(loss)


def get_masks_from_mmsa(module: nn.Module):
    masks = []
    for child in module.children():
        if type(child) is LearnableMask:
            masks += [child.mask]
        else:
            masks += get_masks_from_mmsa(child)
    return masks


def get_mask_val_from_masks(module: nn.Module):
    '''
    use get_mask() fn to get mask val during inference
    '''
    masks = []
    for child in module.children():
        if type(child) is LearnableMask:
            masks += [child.get_mask()]
        else:
            masks += get_mask_val_from_masks(child)
    return masks


def freeze_model_but_mask(model: CAFIA_Transformer):
    return freeze_but_type(model, LearnableMask)


def _test_freeze():
    import json
    from argparse import Namespace
    args = Namespace(
        **json.load(open('Vision-Transformer-ViT/ViT-B_16-224.json', 'r')))
    model = build_mvit(args)

    # set_freeze_all(model, True)
    freeze_model_but_mask(model)
    assert_model_all_freezed(model, True, LearnableMask)
    assert_all_module_type_freezed(model, LearnableMask, freeze=False)


def load_weight_for_mvit(model: CAFIA_Transformer, path: str):
    model.load_state_dict(torch.load(path), strict=False)
    return model


if __name__ == '__main__':
    # test_mmsa()
    # test_cafia_tfm()
    # test_mvit_bp()
    _test_freeze()
    pass
