#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Xiangtai(lxt@pku.edu.cn)
# Pytorch implementation of Dual-GCN net
import mindspore
import mindspore.nn as nn
import numpy as np
from mindspore import Tensor
from mindspore import Parameter
from mindspore import dtype as mstype
from mindspore import context
from mindspore import ParameterTuple
from mindspore.context import ParallelMode
from mindspore.communication.management import get_group_size
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
BatchNorm3d = nn.BatchNorm3d
BatchNorm2d = nn.BatchNorm2d
BatchNorm1d = nn.BatchNorm1d

def conv3x3(in_planes, out_planes, stride=1):
    "3x3 convolution with padding"
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, pad_mode='pad',
                     padding=1, has_bias=False, weight_init='xavier_uniform')


class Bottleneck(nn.Cell):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, has_bias=False)
        self.bn1 = BatchNorm2d(planes, eps=1e-05, momentum=0.1, use_batch_statistics=True)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, pad_mode='pad',
                               padding=dilation*multi_grid, dilation=dilation*multi_grid, has_bias=False)
        self.bn2 = BatchNorm2d(planes, eps=1e-05, momentum=0.1, use_batch_statistics=True)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, has_bias=False)
        self.bn3 = BatchNorm2d(planes * 4, eps=1e-05, momentum=0.1, use_batch_statistics=True)
        self.relu = nn.ReLU()
        self.relu_inplace = nn.ReLU()
        self.downsample = downsample
        self.dilation = dilation
        self.stride = stride

    def construct(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out = out + residual
        out = self.relu_inplace(out)

        return out


class SpatialGCN(nn.Cell):
    def __init__(self, plane):
        super(SpatialGCN, self).__init__()
        inter_plane = plane // 2
        self.node_k = nn.Conv2d(plane, inter_plane, kernel_size=1, pad_mode='valid', has_bias=True)
        self.node_v = nn.Conv2d(plane, inter_plane, kernel_size=1, pad_mode='valid', has_bias=True)
        self.node_q = nn.Conv2d(plane, inter_plane, kernel_size=1, pad_mode='valid', has_bias=True)

        self.conv_wg = nn.Conv1d(inter_plane, inter_plane, kernel_size=1, has_bias=False)
        self.bn_wg = BatchNorm2d(inter_plane, eps=1e-05, momentum=0.1, use_batch_statistics=True)
        self.softmax = mindspore.ops.Softmax(axis=2)

        self.out = nn.SequentialCell([nn.Conv2d(inter_plane, plane, kernel_size=1, has_bias=True),
                                      BatchNorm2d(plane, eps=1e-05, momentum=0.1, use_batch_statistics=True)])
        self.reshape = mindspore.ops.Reshape()
        self.transpose = mindspore.ops.Transpose()
        self.bmm = mindspore.ops.BatchMatMul()
        self.relu = nn.ReLU()

    def construct(self, x):
        # b, c, h, w = x.size
        node_k = self.node_k(x)
        node_v = self.node_v(x)
        node_q = self.node_q(x)
        b,c,h,w = node_k.shape
        node_k = self.reshape(node_k, (b, c, -1))
        node_k = self.transpose(node_k, (0, 2, 1))
        node_q = self.reshape(node_q, (b, c, -1))
        node_v = self.reshape(node_v, (b, c, -1))
        node_v = self.transpose(node_v, (0, 2, 1))
        # A = k * q
        # AV = k * q * v
        # AVW = k *(q *v) * w
        AV = self.bmm(node_q,node_v)
        AV = self.softmax(AV)
        AV = self.bmm(node_k, AV)
        AV = self.transpose(AV, (0, 2, 1))
        AVW = self.conv_wg(AV)
        #AVW = self.bn_wg(AVW)
        AVW = mindspore.ops.Squeeze(-1)(self.bn_wg(mindspore.ops.ExpandDims()(AVW, -1)))
        AVW = self.reshape(AVW, (b, c, h, -1))
        out = self.relu(self.out(AVW) + x)
        return out


class DualGCN(nn.Cell):
    """
        Feature GCN with coordinate GCN
    """
    def __init__(self, planes, ratio=4):
        super(DualGCN, self).__init__()

        self.phi = nn.Conv2d(planes, planes // ratio * 2, kernel_size=1, has_bias=False)
        self.bn_phi = BatchNorm2d(planes // ratio * 2, eps=1e-05, momentum=0.1, use_batch_statistics=True)
        self.theta = nn.Conv2d(planes, planes // ratio, kernel_size=1, has_bias=False)
        self.bn_theta = BatchNorm2d(planes // ratio, eps=1e-05, momentum=0.1, use_batch_statistics=True)

        #  Interaction Space
        #  Adjacency Matrix: (-)A_g
        self.conv_adj = nn.Conv1d(planes // ratio, planes // ratio, kernel_size=1, has_bias=False)
        self.bn_adj = BatchNorm2d(planes // ratio, eps=1e-05, momentum=0.1, use_batch_statistics=True)

        #  State Update Function: W_g
        self.conv_wg = nn.Conv1d(planes // ratio * 2, planes // ratio * 2, kernel_size=1, has_bias=False)
        self.bn_wg = BatchNorm2d(planes // ratio * 2, eps=1e-05, momentum=0.1, use_batch_statistics=True)

        #  last fc
        self.conv3 = nn.Conv2d(planes // ratio * 2, planes, kernel_size=1, has_bias=False)
        self.bn3 = BatchNorm2d(planes, eps=1e-05, momentum=0.1, use_batch_statistics=True)

        self.local = nn.SequentialCell([
            nn.Conv2d(planes, planes, 3, group=planes, stride=2, pad_mode='pad', padding=1, has_bias=False),
            BatchNorm2d(planes, eps=1e-05, momentum=0.1, use_batch_statistics=True),
            nn.Conv2d(planes, planes, 3, group=planes, stride=2, pad_mode='pad', padding=1, has_bias=False),
            BatchNorm2d(planes, eps=1e-05, momentum=0.1, use_batch_statistics=True),
            nn.Conv2d(planes, planes, 3, group=planes, stride=2, pad_mode='pad', padding=1, has_bias=False),
            BatchNorm2d(planes, eps=1e-05, momentum=0.1, use_batch_statistics=True)])
        self.gcn_local_attention = SpatialGCN(planes)

        self.final = nn.SequentialCell([nn.Conv2d(planes * 2, planes, kernel_size=1, has_bias=False),
                                        BatchNorm2d(planes, eps=1e-05, momentum=0.1, use_batch_statistics=True)])
        self.reshape = mindspore.ops.Reshape()
        self.transpose = mindspore.ops.Transpose()
        self.matmul = nn.MatMul()
        #self.interpolate = nn.ResizeBilinear(align_corners=True)
        self.relu = nn.ReLU()


    def to_matrix(self, x):
        n, c, h, w = x.shape
        shape = (n, c, -1)
        x = self.reshape(x, shape)
        return x

    def construct(self, feat):
        # # # # Local # # # #
        x = feat
        local = self.local(feat)
        local = self.gcn_local_attention(local)
        resize_bilinear = mindspore.ops.ResizeBilinear(size=x.shape[2:], align_corners=True)
        local = resize_bilinear(local)
        #local = self.interpolate(local, size=x.shape[2:])
        spatial_local_feat = x * local + x

        # # # # Projection Space # # # #
        x_sqz, b = x, x

        x_sqz = self.phi(x_sqz)
        x_sqz = self.bn_phi(x_sqz)
        x_sqz = self.to_matrix(x_sqz)

        b = self.theta(b)
        b = self.bn_theta(b)
        b = self.to_matrix(b)
        btr = self.transpose(b, (0, 2, 1))

        # Project
        z_idt = self.matmul(x_sqz, btr)

        # # # # Interaction Space # # # #
        z = self.transpose(z_idt, (0, 2, 1))

        z = self.conv_adj(z)
        z = mindspore.ops.Squeeze(-1)(self.bn_adj(mindspore.ops.ExpandDims()(z, -1)))
        #z = self.bn_adj(z)
        z = self.transpose(z, (0, 2, 1))

        # Laplacian smoothing: (I - A_g)Z => Z - A_gZ
        z += z_idt

        z = self.conv_wg(z)
        z = mindspore.ops.Squeeze(-1)(self.bn_wg(mindspore.ops.ExpandDims()(z, -1)))
        #z = self.bn_wg(z)

        # # # # Re-projection Space # # # #
        # Re-project
        y = self.matmul(z, b)

        n, _, h, w = x.shape
        y = self.reshape(y, (n, -1, h, w))

        y = self.conv3(y)
        y = self.bn3(y)

        g_out = self.relu(x+y)

        # cat or sum, nearly the same results
        cat = mindspore.ops.Concat(1)
        out = self.final(cat((spatial_local_feat, g_out)))

        return out


class DualGCNHead(nn.Cell):
    def __init__(self, inplanes, interplanes, num_classes):
        super(DualGCNHead, self).__init__()
        self.conva = nn.SequentialCell([nn.Conv2d(inplanes, interplanes, 3, pad_mode='pad', padding=1, has_bias=False),
                                   BatchNorm2d(interplanes, eps=1e-05, momentum=0.1, use_batch_statistics=True),
                                   nn.ReLU()])
        self.dualgcn = DualGCN(interplanes)
        self.convb = nn.SequentialCell([nn.Conv2d(interplanes, interplanes, 3, pad_mode='pad', padding=1, has_bias=False),
                                   BatchNorm2d(interplanes, eps=1e-05, momentum=0.1, use_batch_statistics=True),
                                   nn.ReLU()])

        self.bottleneck = nn.SequentialCell([
            nn.Conv2d(inplanes + interplanes, interplanes, kernel_size=3, pad_mode='pad', padding=1, dilation=1, has_bias=False),
            BatchNorm2d(interplanes, eps=1e-05, momentum=0.1, use_batch_statistics=True),
            nn.ReLU(),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, pad_mode='pad', padding=0, has_bias=True)
        ])

    def construct(self, x):
        output = self.conva(x)
        output = self.dualgcn(output)
        output = self.convb(output)
        cat = mindspore.ops.Concat(1)
        output = self.bottleneck(cat([x, output]))
        return output


class ResNet(nn.Cell):
    def __init__(self, block, layers, num_classes, is_train):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.is_train = is_train
        self.conv1 = nn.SequentialCell([
            conv3x3(3, 64, stride=2),
            BatchNorm2d(64, eps=1e-05, momentum=0.1, use_batch_statistics=True),
            nn.ReLU(),
            conv3x3(64, 64),
            BatchNorm2d(64, eps=1e-05, momentum=0.1, use_batch_statistics=True),
            nn.ReLU(),
            conv3x3(64, 128)])
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
        self.bn1 = BatchNorm2d(self.inplanes, eps=1e-05, momentum=0.1, use_batch_statistics=True)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1, 2, 4))

        # # # DualGCN
        self.head =DualGCNHead(2048, 512, num_classes)

        self.dsn = nn.SequentialCell([
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, pad_mode='pad', padding=1, has_bias=True),
            BatchNorm2d(512, eps=1e-05, momentum=0.1, use_batch_statistics=True),
            nn.Dropout(0.1),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, pad_mode='pad', padding=0, has_bias=True)
        ])
        # Init ImageSummary
        #self.image_summary = mindspore.ops.ImageSummary()
        # Init TensorSummary
        self.tensor_summary = mindspore.ops.TensorSummary()
        self.is_nan = mindspore.ops.IsNan()
        self.print = mindspore.ops.Print()
        self.abs = mindspore.ops.Abs()


    def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.SequentialCell([
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, has_bias=False),
                BatchNorm2d(planes * block.expansion, eps=1e-05, momentum=0.1, use_batch_statistics=True)])

        layers = []
        generate_multi_grid = lambda index, grids: grids[index % len(grids)] if isinstance(grids, tuple) else 1
        layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample,
                            multi_grid=generate_multi_grid(0, multi_grid)))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid)))

        return nn.SequentialCell(*layers)

    def construct(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        #if self.training:
        x_dsn = self.dsn(x)
        x = self.layer4(x)
        x = self.head(x)

        if self.is_train:
            return [x, x_dsn]
        else:
            return [x]


def DualSeg_res101(num_classes=21,is_train=True):
    model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes, is_train)
    return model

class TrainOneStepCell(nn.Cell):
    """
    Encapsulation class of arbitrary style transfer network training.
    Append an optimizer to the training network after that the construct
    function can be called to create the backward graph.
    """

    def __init__(self, loss, optimizer, scale_sense):
        super(TrainOneStepCell, self).__init__()
        self.optimizer = optimizer
        self.loss = loss
        self.loss.set_grad()
        self.loss.set_train()
        self.grad = mindspore.ops.GradOperation(get_by_list=True, sens_param=True)
        self.sens = sens
        self.weights = mindspore.ParameterTuple(loss.trainable_params())


    def construct(self, data, label):
        weights = self.weights
        loss = self.loss(data, label)
        sens = mindspore.ops.Fill()(mindspore.ops.DType()(loss), mindspore.ops.Shape()(loss), self.sens)
        grads = self.grad(self.loss, weights)(data, label, sens)
        return mindspore.ops.depend(loss, self.optimizer(grads))


_grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()


@_grad_scale.register("Tensor", "Tensor")

def tensor_grad_scale(scale, grad):
    return grad * F.cast(reciprocal(scale), F.dtype(grad))

@_grad_scale.register("Tensor", "RowTensor")

def tensor_grad_scale_row_tensor(scale, grad):
    return RowTensor(grad.indices,
                     grad.values * F.cast(reciprocal(scale), F.dtype(grad.values)),
                     grad.dense_shape)

_grad_overflow = C.MultitypeFuncGraph("_grad_overflow")
grad_overflow = P.FloatStatus()


@_grad_overflow.register("Tensor")

def _tensor_grad_overflow(grad):
    return grad_overflow(grad)

@_grad_overflow.register("RowTensor")

def _tensor_grad_overflow_row_tensor(grad):
    return grad_overflow(grad.values)

class TrainOneStepWithLossScaleCell(nn.wrap.cell_wrapper.TrainOneStepCell):
    r"""
    Network training with loss scaling.

    Args:
        network (Cell): The training network. The network only supports single output.
        optimizer (Cell): Optimizer for updating the weights.
        scale_sense (Union[Tensor, Cell]): If this value is Cell type, the loss scaling update logic cell.If this value
                                          is Tensor type, Tensor with shape :math:`()` or :math:`(1,)`.

    Inputs:
        - **(*inputs)** (Tuple(Tensor)) - Tuple of input tensors with shape :math:`(N, \ldots)`.

    Outputs:
        Tuple of 3 Tensor, the loss, overflow flag and current loss scaling value.

        - **loss** (Tensor) -  Tensor with shape :math:`()`.
        - **overflow** (Tensor) -  Tensor with shape :math:`()`, type is bool.
        - **loss scaling value** (Tensor) -  Tensor with shape :math:`()`

    Raises:
        TypeError: If `scale_sense` is neither Cell nor Tensor.
        ValueError: If shape of `scale_sense` is neither (1,) nor ().

    Supported Platforms:
        ``Ascend`` ``GPU``
    """
    def __init__(self, network, optimizer, scale_sense):
        super(TrainOneStepWithLossScaleCell, self).__init__(network, optimizer, sens=None)
        self.hyper_map = C.HyperMap()
        self.base = Tensor(1, mstype.float32)
        self.reduce_sum = P.ReduceSum(keep_dims=False)
        self.less_equal = P.LessEqual()
        self.allreduce = P.AllReduce()
        self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
        self.gpu_target = (context.get_context("device_target") == "GPU")
        self.loss_scaling_manager = None
        if isinstance(scale_sense, nn.Cell):
            self.loss_scaling_manager = scale_sense
            self.scale_sense = Parameter(Tensor(scale_sense.get_loss_scale(), dtype=mstype.float32),
                                         name="scale_sense")
        elif isinstance(scale_sense, Tensor):
            if scale_sense.shape == (1,) or scale_sense.shape == ():
                self.scale_sense = Parameter(scale_sense, name='scale_sense')
            else:
                raise ValueError("The shape of scale_sense must be (1,) or (), but got {}".format(scale_sense.shape))
        else:
            raise TypeError("The scale_sense must be Cell or Tensor, but got {}".format(type(scale_sense)))

    def construct(self, data, label):
        weights = self.weights
        loss = self.network(data, label)
        scaling_sens = self.scale_sense

        status, scaling_sens = self.start_overflow_check(loss, scaling_sens)

        scaling_sens_filled = C.ones_like(loss) * F.cast(scaling_sens, F.dtype(loss))
        grads = self.grad(self.network, weights)(data, label, scaling_sens_filled)
        grads = self.hyper_map(F.partial(_grad_scale, scaling_sens), grads)
        # apply grad reducer on grads
        grads = self.grad_reducer(grads)

        # get the overflow buffer
        cond = self.get_overflow_status(status, grads)
        overflow = self.process_loss_scale(cond)
        # if there is no overflow, do optimize
        if not overflow:
            if self.use_grad_accumulation:
                loss = self.grad_accumulation(loss, grads)
            else:
                loss = F.depend(loss, self.optimizer(grads))
        return loss, cond, scaling_sens
    def set_sense_scale(self, sens):
        """
        If the user has set the sens in the training process and wants to reassign the value, he can call
        this function again to make modification, and sens needs to be of type Tensor.

        Inputs:
            - **sens** (Tensor) - The new sense whose shape and type are the same with original `scale_sense`.
        """
        if self.scale_sense and isinstance(sens, Tensor):
            self.scale_sense.set_data(sens)
        else:
            raise TypeError("The input type must be Tensor, but got {}".format(type(sens)))

    def start_overflow_check(self, pre_cond, compute_input):
        """
        Start floating-point overflow detection. Create and clear the overflow detection state.

        Specify the argument 'pre_cond' and 'compute_input' to make sure overflow status is cleared at the right time.
        Taking this situation as an example, we need to execute state clearing after loss calculation and then detect
        overflow in the process of gradient calculation. In this case, pre_cond should be the output of the loss
        function, and compute_input should be the input of gradients-computing function.

        Inputs:
            - **pre_cond** (Tensor) - A precondition for starting overflow detection. It determines the executing order
              of overflow state clearing and prior processions. It makes sure that the function 'start_overflow'
              clears status after finishing the process of precondition.
            - **compute_input** (object) - The input of subsequent process. Overflow detection should be performed on a
              certain computation. Set `compute_input` as the input of the computation, to ensure overflow status is
              cleared before executing the computation.

        Outputs:
            Tuple[object, object], the first value is False for GPU backend, while it is a instance of
            NPUAllocFloatStatus for other backend. The status is used to detect overflow during overflow detection.
            The second value is the same as the input of `compute_input`, but contains some information about the
            execution order.
        """
        status = False
        if not self.gpu_target:
            # init overflow buffer
            status = P.NPUAllocFloatStatus()()
            status = F.depend(status, pre_cond)
            # clear overflow buffer
            clear_status = P.NPUClearFloatStatus()(status)
            compute_input = F.depend(compute_input, clear_status)
        return status, compute_input

    def get_overflow_status(self, status, compute_output):
        """
        Get floating-point overflow status.

        Get overflow results after executing the target process for overflow detection.

        Inputs:
            - **status** (object) - A status instance used to detect the overflow.
            - **compute_output** - Overflow detection should be performed on a certain computation. Set `compute_output`
              as the output of the computation, to ensure overflow status is acquired before executing the
              computation.

        Outputs:
            bool, whether the overflow occurs or not.
        """
        if not self.gpu_target:
            status = F.depend(status, compute_output)
            get_status = P.NPUGetFloatStatus()(status)
            status = F.depend(status, get_status)
            # sum overflow buffer elements, 0:not overflow , >0:overflow
            flag_sum = self.reduce_sum(status, (0,))
        else:
            flag_sum = self.hyper_map(F.partial(_grad_overflow), compute_output)
            flag_sum = P.AddN()(flag_sum)
            # convert flag_sum to scalar
            flag_sum = P.Reshape()(flag_sum, (()))

        if self.is_distributed:
            # sum overflow flag over devices
            flag_reduce = self.allreduce(flag_sum)
            overflow = self.less_equal(self.base, flag_reduce)
        else:
            overflow = self.less_equal(self.base, flag_sum)
        return overflow

    def process_loss_scale(self, overflow):
        """
        Calculate loss scale according to the overflow.

        Inputs:
            - **overflow** (bool) - Whether the overflow occurs or not.

        Outputs:
            bool, overflow value.
        """
        if self.loss_scaling_manager is not None:
            return self.loss_scaling_manager(self.scale_sense, overflow)
        return overflow

if __name__ == '__main__':
    aa = Tensor(np.zeros([1, 3, 448, 448]), mstype.float32)
    model = DualSeg_res101()
    mm = model(aa)
    print(mm.shape)

