import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import einsum, nn


# helpers
class FeedForward(nn.Module):
    def __init__(self, dim, hidden_dim, dropout = 0.):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim, hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, dim),
            nn.Dropout(dropout)
        )
    def forward(self, x):
        return self.net(x)


class PreNorm(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.fn = fn
    def forward(self, x, **kwargs):
        return self.fn(self.norm(x), **kwargs)


def pair(t):
    return t if isinstance(t, tuple) else (t, t)


# classes
class Attention(nn.Module):
    def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
        super().__init__()
        inner_dim = dim_head *  heads
        project_out = not (heads == 1 and dim_head == dim)

        self.heads = heads
        self.scale = dim_head ** -0.5

        self.attend = nn.Softmax(dim = -1)
        self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)

        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout)
        ) if project_out else nn.Identity()

    def forward(self, x):
        b, n, _, h = *x.shape, self.heads
        qkv = self.to_qkv(x).chunk(3, dim = -1)
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)

        dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale

        attn = self.attend(dots)

        out = einsum('b h i j, b h j d -> b h i d', attn, v)
        out = rearrange(out, 'b h n d -> b n (h d)')
        return self.to_out(out)


class Transformer(nn.Module):
    def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
        super().__init__()
        self.layers = nn.ModuleList([])
        for _ in range(depth):
            self.layers.append(nn.ModuleList([
                PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
                PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
            ]))
    def forward(self, x):
        for attn, ff in self.layers:
            x = attn(x) + x
            x = ff(x) + x
        return x



def pixel_upsample(x, H, W):
    B, N, C = x.size()
    assert N == H*W
    x = x.permute(0, 2, 1)
    x = x.view(-1, C, H, W)
    x = nn.PixelShuffle(3)(x)
    B, C, H, W = x.size()
    x = x.view(-1, C, H*W)
    x = x.permute(0,2,1)
    return x, H, W


class Generator(nn.Module):
    def __init__(self,  embed_dim=384, depth=5,num_heads=2, drop_path_rate=0., ):
        super(Generator, self).__init__()
        self.fc2 = nn.Linear(2, 10)
        self.ch = embed_dim
        self.bottom_width = 1         
        self.embed_dim = embed_dim = 4860
        self.l1 = nn.Linear(50, (self.bottom_width ** 2) * self.embed_dim)
        self.pos_embed_1 = nn.Parameter(torch.randn(1, self.bottom_width**3, embed_dim))
        self.pos_embed_2 = nn.Parameter(torch.randn(1, self.bottom_width*9, embed_dim//9))
        self.pos_embed_3 = nn.Parameter(torch.randn(1, self.bottom_width*81, embed_dim//81))

        self.pos_embed = [
            self.pos_embed_1,
            self.pos_embed_2,
            self.pos_embed_3,
        ]
        is_mask = True
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule
        self.blocks = Transformer(dim=embed_dim, dim_head=64 ,depth=3, heads=num_heads, mlp_dim=embed_dim//4)
                                   
        self.upsample_blocks = nn.ModuleList([Transformer(dim=embed_dim//9, dim_head=64 ,depth=3, heads=num_heads, mlp_dim=embed_dim//4),
                                              Transformer(dim=embed_dim//81, dim_head=64 ,depth=3, heads=num_heads, mlp_dim=embed_dim//4)
        ])
                
        self.to_rgb = nn.Sequential(
            nn.BatchNorm2d(64),
            nn.ReLU(),
            # nn.Conv2d(args.gf_dim, 3, 3, 1, 1),
            nn.Tanh()
        )

        # self.deconv = nn.Sequential(
        #     # nn.BatchNorm2d(self.embed_dim),
        #     # nn.ReLU(),
        #     nn.Conv2d(self.embed_dim//16, 1, 1, 1, 0)
        # )

    def forward(self, z, onehot_label):
        y_ = self.fc2(onehot_label)
        y_ = F.sigmoid(y_)
        z = torch.cat([z, y_], 1)
        x = self.l1(z).view(-1, self.bottom_width ** 2, self.embed_dim) # 
        x = x + self.pos_embed[0].to(x.get_device())
        B = x.size()
        H, W = self.bottom_width, self.bottom_width
        x = self.blocks(x) 
        for index, blk in enumerate(self.upsample_blocks):
            # x = x.permute(0,2,1)
            # x = x.view(-1, self.embed_dim, H, W)
            x, H, W = pixel_upsample(x, H, W)
            x = x + self.pos_embed[index+1].to(x.get_device())
            
            x = blk(x)
            # _, _, H, W = x.size()
            # x = x.view(-1, self.embed_dim, H*W)
            # x = x.permute(0,2,1)
        x = x.permute(0, 2, 1)
        x = x.view(x.shape[0], self.embed_dim//81, H, W)
        # output = x.unsqueeze(1)
        return x


class Discriminator(nn.Module):
    """ Vision Transformer with support for patch or hybrid CNN input stage
    """
    def __init__(self, img_size=28, patch_size=None, in_chans=1, num_classes=1, embed_dim=None, depth=7,
                 num_heads=4, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
                 drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm):
        super().__init__()
        self.num_classes = num_classes
        self.num_features = embed_dim = self.embed_dim = 81 # num_features for consistency with other models
        depth = 7
        patch_size = 4
        self.patch_embed = nn.Conv2d(1, embed_dim, kernel_size=patch_size, stride=patch_size, padding=0)
        num_patches = 60
        self.reality_token = nn.Parameter(torch.randn(1, 1, embed_dim))
        self.cls_token = nn.Parameter(torch.randn(1, 1, embed_dim))
        self.pos_embed = nn.Parameter(torch.randn(1, num_patches + 2, embed_dim))
        self.pos_drop = nn.Dropout(p=drop_rate)
        
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule
        self.blocks = Transformer(dim=embed_dim, dim_head=64 ,depth=8, heads=num_heads, mlp_dim=embed_dim//4)
        self.norm = norm_layer(embed_dim)
        # NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here
        #self.repr = nn.Linear(embed_dim, representation_size)
        #self.repr_act = nn.Tanh()
        # Classifier head
        self.reality_head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
        self.cls_head = nn.Linear(embed_dim, 2) if num_classes > 0 else nn.Identity()


    def forward(self, x):
        B = x.shape[0]
        x = x.flatten(2)

        cls_tokens = self.cls_token.expand(B, -1, -1)  # stole cls_tokens impl from Phil Wang, thanks
        reality_tokens = self.reality_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = torch.cat((reality_tokens, x), dim=1)
        x = x + self.pos_embed
        x = self.pos_drop(x)
        x = self.blocks(x)
        x = self.norm(x)
        x = x[:,0]
        x_out1 = self.reality_head(x)
        x_out1 = nn.Sigmoid()(x_out1)
        x_out2 = self.cls_head(x)
        x_out2 = nn.Softmax(dim=1)(x_out2)
        return x_out1, x_out2 
