import mindspore
from mindspore import nn, Tensor
from mindspore import ops as P
from mindspore.common.initializer import initializer, XavierUniform

MIN_NUM_PATCHES = 4


class BatchDense(nn.Cell):
    """BatchDense module."""

    def __init__(self, in_features, out_features, initialization, has_bias=True):
        super(BatchDense, self).__init__()
        self.out_features = out_features
        self.dense = nn.Dense(in_features, out_features, has_bias=has_bias)
        self.dense.weight.set_data(initializer(initialization, [out_features, in_features]))
        self.reshape = P.Reshape()
        self.pixel_values = self.dense.weight.shape[-1]

    def construct(self, x):
        bs, seq_len, dim = x.shape
        out = self.reshape(x, (bs * seq_len, dim))
        out = self.dense(out)
        out = self.reshape(out, (bs, seq_len, self.out_features))
        return out


class VitStem(nn.Cell):
    """Stem layer for ViT."""

    def __init__(self, dim, patch_size, image_size, channels=3, initialization=XavierUniform()):
        super(VitStem, self).__init__()

        assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
        num_patches = (image_size // patch_size) ** 2
        assert num_patches > MIN_NUM_PATCHES, f'your number of patches {num_patches} is too small'
        patch_dim = channels * patch_size ** 2

        self.patch_size = patch_size
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.patch_to_embedding = BatchDense(patch_dim, dim, initialization, has_bias=True)

    def construct(self, img):
        p = self.patch_size
        bs, channels, h, w = img.shape
        x = self.reshape(img, (bs, channels, h // p, p, w // p, p))
        x = self.transpose(x, (0, 2, 4, 1, 3, 5))
        patches = self.reshape(x, (bs, (h//p)*(w//p), channels*p*p))
        x = self.patch_to_embedding(patches)
        return x, patches


grad_scale = P.MultitypeFuncGraph("grad_scale")

@grad_scale.register("Tensor", "Tensor")
def gradient_scale(scale, grad):
    return grad * P.cast(scale, P.dtype(grad))

class VitTrainOneStepCell(nn.TrainOneStepCell):
    def __init__(self, network, optimizer, sens=1.0):
        super(VitTrainOneStepCell, self).__init__(network, optimizer, sens)
        self.hyper_map = P.HyperMap()
        self.reciprocal_sense = Tensor(1 / sens, mindspore.float32)

    def scale_grad(self, gradients):
        gradients = self.hyper_map(P.partial(grad_scale, self.reciprocal_sense), gradients)
        return gradients

    def construct(self, *inputs):
        loss = self.network(*inputs)
        sens = P.fill(loss.dtype, loss.shape, self.sens)
        # calculate gradients, the sens will equal to the loss_scale
        grads = self.grad(self.network, self.weights)(*inputs, sens)
        # gradients / loss_scale
        grads = self.scale_grad(grads)
        # reduce gradients in distributed scenarios
        grads = self.grad_reducer(grads)
        loss = P.depend(loss, self.optimizer(grads))
        return loss

