import mindspore
import mindspore.nn as nn
import mindspore.ops.operations as P
import mindspore.common.initializer as weight_init
from diff_aug import DiffAugment
from mindspore import Parameter, Tensor
import numpy as np
import mindspore.ops as ops

class MLP(nn.Cell):
    def __init__(self, in_feat, hid_feat=None, out_feat=None,
                 dropout=0.):
        super().__init__()
        if not hid_feat:
            hid_feat = in_feat
        if not out_feat:
            out_feat = in_feat
        self.fc1 = nn.Dense(in_channels=in_feat, out_channels=hid_feat)
        self.act = nn.GELU()
        self.fc2 = nn.Dense(in_channels=hid_feat, out_channels=out_feat)
        self.droprateout = nn.Dropout(keep_prob=1.0 - dropout)

    def construct(self, x):
        x = self.fc1(x)
        x = self.act(x)
        x = self.fc2(x)
        return self.droprateout(x)


class Attention(nn.Cell):
    def __init__(self, dim, heads=4, attention_dropout=0., proj_dropout=0.):
        super().__init__()
        self.heads = heads
        self.scale = 1./dim**0.5

        self.qkv = nn.Dense(in_channels=dim, out_channels=dim * 3, has_bias=False)
        self.attention_dropout = nn.Dropout(keep_prob=1.0 - attention_dropout)
        self.out = nn.SequentialCell([
            nn.Dense(in_channels=dim, out_channels=dim),
            nn.Dropout(keep_prob=1.0 - proj_dropout)
        ])

    def construct(self, x):
        b, n, c = x.shape
        qkv = self.qkv(x).reshape(b, n, 3, self.heads, c//self.heads)
        q, k, v = P.Transpose()(qkv , (2, 0, 3, 1, 4,))
        # print(P.Shape()(q))
        # print(P.Shape()(k))
        # print("b, n, c ", b, n, c)
        # dot = (q @ k.transpose(-2, -1)) * self.scale
        dot = ops.BatchMatMul(transpose_a=False, transpose_b=True)(q, k) * self.scale
        # dot = (q @ P.Transpose()(k, (0, 1, 3, 2))) * self.scale
        attn = ops.Softmax()(dot)
        attn = self.attention_dropout(attn)

        x = ops.BatchMatMul()(attn, v)
        x = P.Transpose()(x, (0, 2, 1, 3)).reshape(b, n, c)

        # x = (attn @ v).transpose(1, 2).reshape(b, n, c)
        x = self.out(x)
        return x

class ImgPatches(nn.Cell):
    def __init__(self, input_channel=3, dim=768, patch_size=4):
        super().__init__()
        self.patch_embed = nn.Conv2d(in_channels=input_channel, out_channels=dim, kernel_size=patch_size, stride=patch_size, pad_mode='pad', has_bias=True)

    def construct(self, img):
        # print(P.Shape()(img))
        patched_embed = self.patch_embed(img)
        shape_patched_embed = P.Shape()(patched_embed)
        # print(P.Shape()(patched_embed))
        # print(P.Shape()(patched_embed.view((shape_patched_embed[0], shape_patched_embed[1], shape_patched_embed[2] * shape_patched_embed[3]))))
        
        patches = P.Transpose()(patched_embed.view((shape_patched_embed[0], shape_patched_embed[1], shape_patched_embed[2] * shape_patched_embed[3])), (0, 2, 1))
        return patches

def UpSampling(x, H, W):
        # B, N, C = x.size()
        B, N, C = P.Shape()(x)
        # x = x.permute(0, 2, 1)
        x = P.Transpose()(x, (0, 2, 1))
        x = x.view(-1, C, H, W)
        # x = nn.PixelShuffle(2)(x)
        x = P.DepthToSpace(2)(x)
        # B, C, H, W = x.size()
        B, C, H, W = P.Shape()(x)
        x = x.view(-1, C, H*W)
        # x = x.permute(0,2,1)
        x = P.Transpose()(x, (0, 2, 1))
        return x, H, W

class Encoder_Block(nn.Cell):
    def __init__(self, dim, heads, mlp_ratio=4, drop_rate=0.):
        super().__init__()
        self.ln1 = nn.LayerNorm(normalized_shape=(dim, ), epsilon=1e-05)
        self.attn = Attention(dim, heads, drop_rate, drop_rate)
        self.ln2 = nn.LayerNorm(normalized_shape=(dim, ), epsilon=1e-05)
        self.mlp = MLP(dim, dim*mlp_ratio, dropout=drop_rate)

    def construct(self, x):
        # print(P.Shape()(x))
        x1 = self.ln1(x)
        # print(P.Shape()(x1))
        attn_x1 = self.attn(x1)
        # print(P.Shape()(attn_x1))
        x = x + attn_x1
        x2 = self.ln2(x)
        x = x + self.mlp(x2)
        return x


class TransformerEncoder(nn.Cell):
    def __init__(self, depth, dim, heads, mlp_ratio=4, drop_rate=0.):
        super().__init__()
        self.Encoder_Blocks = nn.CellList([
            Encoder_Block(dim, heads, mlp_ratio, drop_rate)
            for i in range(depth)])

    def construct(self, x):
        for Encoder_Block in self.Encoder_Blocks:
            x = Encoder_Block(x)
        return x

class Generator(nn.Cell):
    """docstring for Generator"""
    def __init__(self, depth1=5, depth2=4, depth3=2, initial_size=8, dim=384, heads=4, mlp_ratio=4, drop_rate=0.):#,device=device):
        super(Generator, self).__init__()

        #self.device = device
        self.initial_size = initial_size
        self.dim = dim
        self.depth1 = depth1
        self.depth2 = depth2
        self.depth3 = depth3
        self.heads = heads
        self.mlp_ratio = mlp_ratio
        self.droprate_rate =drop_rate

        self.mlp = nn.Dense(in_channels=1024, out_channels=self.initial_size ** 2 * self.dim)

        self.positional_embedding_1 = Parameter(Tensor(np.zeros((1, (8**2), 384)), dtype=mindspore.dtype.float32))
        self.positional_embedding_2 = Parameter(Tensor(np.zeros((1, (8*2)**2, 384//4)), dtype=mindspore.dtype.float32))
        self.positional_embedding_3 = Parameter(Tensor(np.zeros((1, (8*4)**2, 384//16)), dtype=mindspore.dtype.float32))

        self.TransformerEncoder_encoder1 = TransformerEncoder(depth=self.depth1, dim=self.dim,heads=self.heads, mlp_ratio=self.mlp_ratio, drop_rate=self.droprate_rate)
        self.TransformerEncoder_encoder2 = TransformerEncoder(depth=self.depth2, dim=self.dim//4, heads=self.heads, mlp_ratio=self.mlp_ratio, drop_rate=self.droprate_rate)
        self.TransformerEncoder_encoder3 = TransformerEncoder(depth=self.depth3, dim=self.dim//16, heads=self.heads, mlp_ratio=self.mlp_ratio, drop_rate=self.droprate_rate)


        self.linear = nn.SequentialCell([nn.Conv2d(in_channels=self.dim // 16, out_channels=3, kernel_size=1, stride=1, pad_mode='pad', padding=0, has_bias=True)])

    def construct(self, noise):

        x = self.mlp(noise).view(-1, self.initial_size ** 2, self.dim)

        x = x + self.positional_embedding_1
        H, W = self.initial_size, self.initial_size
        x = self.TransformerEncoder_encoder1(x)

        x,H,W = UpSampling(x,H,W)
        x = x + self.positional_embedding_2
        x = self.TransformerEncoder_encoder2(x)

        x,H,W = UpSampling(x,H,W)
        x = x + self.positional_embedding_3

        x = self.TransformerEncoder_encoder3(x)
        x = self.linear(P.Reshape()(P.Transpose()(x, (0, 2, 1,)), (-1, self.dim//16, H, W,)))

        return x

class Discriminator(nn.Cell):
    def __init__(self, diff_aug, image_size=32, patch_size=4, input_channel=3, num_classes=1,
                 dim=384, depth=7, heads=4, mlp_ratio=4,
                 drop_rate=0.):
        super().__init__()
        if image_size % patch_size != 0:
            raise ValueError('Image size must be divisible by patch size.')
        num_patches = (image_size//patch_size) ** 2
        self.diff_aug = diff_aug
        self.patch_size = patch_size
        self.depth = depth
        # Image patches and embedding layer
        self.patches = ImgPatches(input_channel, dim, self.patch_size)

        # Embedding for patch position and class
        self.positional_embedding = Parameter(Tensor(np.zeros((1, num_patches+1, dim)), dtype=mindspore.dtype.float32))
        self.class_embedding = Parameter(Tensor(np.zeros((1, 1, dim)), dtype=mindspore.dtype.float32))
        
        self.positional_embedding.set_data(weight_init.initializer(weight_init.TruncatedNormal(0.2),
                                                                   self.positional_embedding.shape,
                                                                   self.positional_embedding.dtype))
                                                                 # 这里参数sigma？
        self.class_embedding.set_data(weight_init.initializer(weight_init.TruncatedNormal(0.2),
                                                              self.class_embedding.shape,
                                                              self.class_embedding.dtype))
        # nn.init.trunc_normal_(self.positional_embedding, std=0.2) # 截断正态分布，std是标准差
        # nn.init.trunc_normal_(self.class_embedding, std=0.2)

        self.droprate = nn.Dropout(keep_prob=1.0 - drop_rate)
        self.TransfomerEncoder = TransformerEncoder(depth, dim, heads,
                                      mlp_ratio, drop_rate)
        self.norm = nn.LayerNorm(normalized_shape=(dim, ), epsilon=1e-05)
        self.out = nn.Dense(in_channels=dim, out_channels=num_classes)


    # def _init_weights(self, m):
    #     if isinstance(m, nn.Linear):
    #         nn.init.trunc_normal_(m.weight, std=.02)
    #         if isinstance(m, nn.Linear) and m.bias is not None:
    #             nn.init.constant_(m.bias, 0)
    #     elif isinstance(m, nn.LayerNorm):
    #         nn.init.constant_(m.bias, 0)
    #         nn.init.constant_(m.weight, 1.0)

    def construct(self, x):
        # print("before DiffAugment", P.Shape()(x))
        x = DiffAugment(x, self.diff_aug)
        b = x.shape[0]
        print(x.shape) #(8, 3, 32, 32)
        print(b) #8 
        print(self.class_embedding.shape) #(1, 1, 384)
        #cls_token = ops.BroadcastTo((b, -1, -1))(self.class_embedding)
        cls_token = ops.BroadcastTo((b, -1, 384))(self.class_embedding)

        # print("after DiffAugment", P.Shape()(x))
        x = self.patches(x) # x is (8, 64, 384)
        
        print(P.Shape()(cls_token))
        print(P.Shape()(x))

        x = P.Concat(1)((cls_token, x))
        print("Concat success")
        x += self.positional_embedding
        x = self.droprate(x)
        x = self.TransfomerEncoder(x)
        x = self.norm(x)
        x = self.out(x[:, 0])
        return x
