""" Swin Transformer
A MindSpore implementation of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
    - https://arxiv.org/pdf/2103.14030

Code from https://github.com/microsoft/Swin-Transformer

"""

import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.numpy as msnp
import numpy as np
from typing import Optional

def drop_path_f(x, drop_prob: float = 0., training: bool = False):
    """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
    the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
    changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
    'survival rate' as the argument.

    """
    if drop_prob == 0. or not training:
        return x
    keep_prob = 1 - drop_prob
    shape = (x.shape[0],) + (1,) * (x.ndim - 1)  # work with diff dim tensors, not just 2D ConvNets
    random_tensor = keep_prob + ms.Tensor(np.random.rand(*shape), dtype=x.dtype)
    random_tensor = ms.numpy.floor(random_tensor)  # binarize
    output = x / (keep_prob) * random_tensor
    return output

if __name__ == '__main__':
    ms.context.set_context(mode=1, device_target="GPU")
    x = ms.Tensor([[1,2,3], [4,5,6], [7,8,9]], dtype=ms.float32)
    print(drop_path_f(x, 0.2, True))