# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import dtype as mstype
from mindspore.ops import operations as P
from mindspore import Parameter, Tensor
from mindspore.common.initializer import TruncatedNormal, initializer
from mindspore import load_checkpoint, load_param_into_net
from mindspore import context
import mindspore


# text shape:
# torch.Size([1000, 77])
# image shape:
# torch.Size([18, 3, 224, 224])
class QuickGELU(nn.Cell):
    def __init__(self):
        super(QuickGELU, self).__init__()
        self.ratio = 1.702
        self.sigmoid = nn.Sigmoid()

    def construct(self, x):
        return x * self.sigmoid(self.ratio * x)


class MultiheadAttention(nn.Cell):
    def __init__(self, d_model, n_head):
        """
        :param d_model: width of tensor/embedding dim
        :param n_head: output of multihead attention/num_heads
        """
        super(MultiheadAttention, self).__init__()
        self.embed_dim = d_model
        self.num_heads = n_head
        self.head_dim = self.embed_dim // self.num_heads
        self.in_proj = nn.Dense(self.embed_dim, 3 * self.embed_dim)
        self.out_proj = nn.Dense(self.embed_dim, self.embed_dim)
        self.split = ops.Split(-1, 3)
        self.expand_dims = P.ExpandDims()
        self.softmax = nn.Softmax(-1)
        self.transpose = ops.Transpose()
        self.scaling = self.head_dim ** -0.5

    def construct(self, query, key, value):
        tgt_len, bsz, embed_dim = query.shape
        qkv = self.in_proj(query).view(tgt_len, bsz, 3, embed_dim).transpose((2, 0, 1, 3))
        q = qkv[0:1]
        k = qkv[1:2]
        v = qkv[2:3]
        q = ops.Squeeze(0)(q)
        k = ops.Squeeze(0)(k)
        v = ops.Squeeze(0)(v)
        q = q * self.scaling
        q = q.view(tgt_len, bsz * self.num_heads, self.head_dim).transpose((1, 0, 2))  # (bs) x (HW + 1) x h
        k = k.view(-1, bsz * self.num_heads, self.head_dim).transpose((1, 0, 2))  # (bs) x (HW + 1) x h
        v = v.view(-1, bsz * self.num_heads, self.head_dim).transpose((1, 0, 2))  # (bs) x (HW + 1) x h
        attn_output_weights = ops.matmul(q, k.transpose((0, 2, 1)))  # bs x (HW + 1) x (HW + 1)
        attn_output_weights = self.softmax(attn_output_weights)  # bs x (HW + 1) x (HW + 1)
        attn_output = ops.matmul(attn_output_weights, v)  # bs x (HW + 1) x h
        attn_output = self.transpose(attn_output, (1, 0, 2))
        attn_output = attn_output.view(tgt_len, bsz, embed_dim)
        attn_output = self.out_proj(attn_output)
        return attn_output


class AttentionWithMask(nn.Cell):
    def __init__(self, d_model, n_head, attn_mask):
        super(AttentionWithMask, self).__init__()
        self.attn = MultiheadAttention(d_model, n_head)
        self.attn_mask = attn_mask

    def construct(self, x):
        return self.attn(x, x, self.attn_mask)


class ResidualAttentionBlock(nn.Cell):
    def __init__(self, d_model, n_head, attn_mask):
        super(ResidualAttentionBlock, self).__init__()
        self.attn = AttentionWithMask(d_model, n_head, attn_mask)
        self.ln_1 = nn.LayerNorm([d_model])
        self.c_fc = nn.Dense(d_model, d_model * 4)
        self.gelu = QuickGELU()
        self.c_proj = nn.Dense(d_model * 4, d_model)
        self.mlp = nn.SequentialCell([
            self.c_fc,
            self.gelu,
            self.c_proj
        ])
        self.ln_2 = nn.LayerNorm([d_model])

    def construct(self, x):
        x = x + self.attn(self.ln_1(x))
        x = x + self.mlp(self.ln_2(x))
        return x


class Transformer(nn.Cell):
    def __init__(self, width, layers, heads, attn_mask):
        super(Transformer, self).__init__()
        self.width = width
        self.layers = layers
        self.resblocks = nn.SequentialCell(
            *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
        )
        # self.init_weights()

    def construct(self, x):
        return self.resblocks(x)

    def init_weights(self):
        proj_std = (self.width ** -0.5) * (
                (2 * self.layers) ** -0.5
        )
        attn_std = self.width ** -0.5
        fc_std = (2 * self.width) ** -0.5
        for block in self.resblocks:
            nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
            nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
            nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
            nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)


class TextTransformer(nn.Cell):
    """
    the official language model provided by CLIP
    """

    def __init__(self, context_length, vocab_size, output_dim, width, layers, heads, pretrained=None, **kwargs):
        # self.context_length = context_length
        super(TextTransformer, self).__init__()
        self.width = width
        self.vocab_size = vocab_size
        self.token_embedding = nn.Embedding(vocab_size, self.width)
        self.positional_embedding = Parameter(initializer(TruncatedNormal(0.01), [context_length, self.width]))
        self.ln_final = nn.LayerNorm([self.width])
        self.text_projection = Parameter(initializer(TruncatedNormal(0.01), [self.width, output_dim]))
        self.transformer_layer = Transformer(width, layers, heads, self.build_attention_mask(context_length))
        # self.init_weights_(pretrained)

    @staticmethod
    def build_attention_mask(context_length):
        mask = np.triu(np.full((context_length, context_length), -np.inf).astype(np.float32), 1)
        mask = Tensor(mask)
        return mask

    def init_weights_(self, pretrained=None):
        # rename to avoid overwriting the method
        if pretrained:
            param_dict = load_checkpoint(pretrained)
            load_param_into_net(self, param_dict)
            return
        nn.init.normal_(self.token_embedding.weight, std=0.02)
        nn.init.normal_(self.positional_embedding, std=0.01)
        if self.text_projection is not None:
            nn.init.normal_(self.text_projection, std=self.width ** -0.5)

    def construct(self, text):
        x = self.token_embedding(text)

        x = x + self.positional_embedding
        x = x.transpose(1, 0, 2)  # NLD -> LND
        x = self.transformer_layer(x)
        x = x.transpose(1, 0, 2)  # LND -> NLD
        x = self.ln_final(x)
        x = x[nn.Range(x.shape[0])(), self.cast(self.cast(text != 0, mstype.float32).sum(axis=-1), mstype.int32) - 1]
        x = ops.matmul(x, self.text_projection)
        return x


if __name__ == '__main__':
    context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
    text_transformer = TextTransformer(
        context_length=77,
        vocab_size=49408,
        width=512,
        output_dim=512,
        heads=8,
        layers=12,
    )
    x = Tensor(np.ones((1000, 77)), mindspore.int32)
    output = text_transformer(x)
    print(output.shape)
