# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor,Parameter
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore import context
from mindspore.context import ParallelMode
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.communication.management import get_group_size


class InvBlockExp(nn.Cell):
    """
        define InvBlockExp
    """
    def __init__(self, subnet_constructor, channel_num, channel_split_num, clamp=1.):
        super(InvBlockExp, self).__init__()
        self.split_len1 = channel_split_num
        self.split_len2 = channel_num - channel_split_num
        self.clamp = clamp

        self.F = subnet_constructor(self.split_len2, self.split_len1)
        self.G = subnet_constructor(self.split_len1, self.split_len2)
        self.H = subnet_constructor(self.split_len1, self.split_len2)

        self.sigmoid = ops.Sigmoid()
        self.mul = ops.Mul()
        self.exp = ops.Exp()
        self.div = ops.Div()
        self.cat = ops.Concat(axis=1)

    def construct(self, x, rev=False):
        x1 = x[:, 0:self.split_len1]
        x2 = x[:, self.split_len1:(self.split_len1 + self.split_len2)]

        if not rev:
            y1 = x1 + self.F(x2)
            s = self.clamp * (self.sigmoid(self.H(y1)) * 2 - 1)
            y2 = self.mul(x2, self.exp(s)) + self.G(y1)

        else:
            s = self.clamp * (self.sigmoid(self.H(x1)) * 2 - 1)
            y2 = self.div(x2 - self.G(x1), self.exp(s))
            y1 = x1 - self.F(y2)

        output = self.cat((y1, y2))
        return output


class HaarDownsampling(nn.Cell):
    def __init__(self, channel_in):
        super(HaarDownsampling, self).__init__()
        ones = ops.Ones()
        self.channel_in = channel_in
        self.transpose = ops.Transpose()
        self.haar_weight = ones((4, 1, 2, 2),ms.float32)

        self.haar_weight[1, 0, 0, 1] = Tensor(-1, ms.float32)
        self.haar_weight[1, 0, 1, 1] = Tensor(-1, ms.float32)

        self.haar_weight[2, 0, 1, 0] = Tensor(-1, ms.float32)
        self.haar_weight[2, 0, 1, 1] = Tensor(-1, ms.float32)

        self.haar_weight[3, 0, 1, 0] = Tensor(-1, ms.float32)
        self.haar_weight[3, 0, 0, 1] = Tensor(-1, ms.float32)

        self.haar_weights = Parameter(self.haar_weight)
        self.haar_weights.requires_grad = False
        self.groupConv = GroupConv(out_channels=self.haar_weights.shape[0] * self.channel_in,
                                   in_channels=self.channel_in,
                                   kernel_size=self.haar_weights.shape[2],
                                   stride=2,weights=self.haar_weights,
                                   group=self.channel_in)
        self.groupConvTranspose = GroupConvTranspose(in_channels=self.channel_in,
                                                     out_channels=self.haar_weights.shape[0] * self.channel_in,
                                                     kernel_size=self.haar_weights.shape[2],
                                                     stride=2,weights=self.haar_weights,
                                                     group=self.channel_in)

    def construct(self, x, rev=False):
        if not rev:
            out = self.groupConv(x) / 4.0
            out = out.reshape((x.shape[0], self.channel_in, 4, x.shape[2] // 2, x.shape[3] // 2))
            input_perm = (0, 2, 1, 3, 4)
            out = self.transpose(out, input_perm)
            out = out.reshape((x.shape[0], self.channel_in * 4, x.shape[2] // 2, x.shape[3] // 2))
            return out
        else:
            out = x.reshape((x.shape[0], 4, self.channel_in, x.shape[2], x.shape[3]))
            input_perm = (0, 2, 1, 3, 4)
            out= self.transpose(out, input_perm)
            out = out.reshape((x.shape[0], self.channel_in * 4, x.shape[2], x.shape[3]))
            out = self.groupConvTranspose(out)
            return out


class GroupConv(nn.Cell):
    """
    group convolution operation.

    Args:
        in_channels (int): Input channels of feature map.
        out_channels (int): Output channels of feature map.
        weights (Tensor):  Convolution kernel.
        stride (int): Stride size for the group convolution layer.

    Returns:
        tensor, output tensor.
    """
    def __init__(self, in_channels, out_channels, weights, kernel_size,
                 stride, pad_mode="pad", pad=0, group=1):
        super(GroupConv, self).__init__()
        assert in_channels % group == 0 and out_channels % group == 0
        self.group = group
        self.convs = []
        self.op_split = ops.Split(axis=1, output_num=self.group)
        self.op_concat = ops.Concat(axis=1)
        self.cast = ops.Cast()
        self.weights = Parameter(weights, requires_grad=False)
        for _ in range(group):
            self.convs.append(ops.Conv2D(out_channel=out_channels//group,
                                         kernel_size=kernel_size,
                                         stride=stride, pad=pad,
                                         pad_mode=pad_mode,group=1)
            )


    def construct(self, x):
        features = self.op_split(x)
        outputs = ()
        for i in range(self.group):
            outputs = outputs + (self.convs[i](self.cast(features[i], mindspore.float32), self.weights),)
        out = self.op_concat(outputs)
        return out


class GroupConvTranspose(nn.Cell):
    """
    group Transpose convolution operation.

    Args:
        in_channels (int): Input channels of feature map.
        out_channels (int): Output channels of feature map.
        weights (Tensor):  Convolution kernel.
        stride (int): Stride size for the group convolution layer.

    Returns:
        tensor, output tensor.
    """
    def __init__(self, in_channels, out_channels, weights, kernel_size,
                 stride, pad_mode="pad", pad=0, group=1):
        super(GroupConvTranspose, self).__init__()
        assert in_channels % group == 0 and out_channels % group == 0
        self.group = group
        self.convs = []
        self.op_split = ops.Split(axis=1, output_num=self.group)
        self.op_concat = ops.Concat(axis=1)
        self.cast = ops.Cast()
        self.weights = Parameter(weights, requires_grad=False)
        self.stride = stride
        for _ in range(group):
            self.convs.append(ops.Conv2DTranspose(out_channel=out_channels//group,
                                                  kernel_size=kernel_size,
                                                  stride=stride,pad_mode=pad_mode,
                                                  pad=pad,group=1)
            )

    def construct(self, x):
        features = self.op_split(x)
        outputs = ()
        for i in range(self.group):
            input_perm = (features[i].shape[0], 1, features[i].shape[2] * self.stride, features[i].shape[3] * self.stride)
            outputs = outputs + (self.convs[i](self.cast(features[i], mindspore.float32), self.weights, input_perm),)
        out = self.op_concat(outputs)
        return out


class InvRescaleNet(nn.Cell):
    def __init__(self, channel_in=3, channel_out=3, subnet_constructor=None, block_num=None, down_num=2):
        super(InvRescaleNet, self).__init__()
        if block_num is None:
            block_num = []
        operations = []
        current_channel = channel_in
        for i in range(down_num):
            b = HaarDownsampling(current_channel)
            operations.append(b)
            current_channel *= 4
            for _ in range(block_num[i]):
                b = InvBlockExp(subnet_constructor, current_channel, channel_out)
                operations.append(b)
        self.operations = nn.CellList(operations)

    def construct(self, x, rev=False):
        out = x
        if not rev:
            for i in range(len(self.operations)):
                out = self.operations[i](out, rev)
        else:
            for i in range(len(self.operations) - 1, -1, -1):
                out = self.operations[i](out, rev)
        return out


class TrainOneStepInvertibleRescalingNet(nn.Cell):
    """
     Encapsulation class of network training.
    Append an optimizer to the training network after that the construct
    function can be called to create the backward graph.
    """
    def __init__(self, G, optimizer, gradient_clipping, sens=1.0):
        super(TrainOneStepInvertibleRescalingNet, self).__init__()
        self.gradient_clipping = gradient_clipping
        self.G = G
        self.optimizer = optimizer
        self.weights = optimizer.parameters
        self.G.set_grad()
        self.G.set_train()
        self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
        self.reducer_flag = False
        self.grad_reducer = None
        self.sens = sens
        self.hyper_map = ops.HyperMap()
        self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
        if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
            self.reducer_flag = True
        if self.reducer_flag:
            mean = context.get_auto_parallel_context("gradients_mean")
            if auto_parallel_context().get_device_num_is_set():
                degree = context.get_auto_parallel_context("device_num")
            else:
                degree = get_group_size()
            self.grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)

    def construct(self, LR_img, HR_img):
        weights = self.weights
        lg = self.G(LR_img, HR_img)
        sens_g = ops.Fill()(ops.DType()(lg), ops.Shape()(lg), self.sens)
        grads_g = self.grad(self.G, weights)(LR_img, HR_img, sens_g)
        # gradient clipping
        grads_g = self.hyper_map(ops.partial(clip_grad, 1, self.gradient_clipping), grads_g)
        if self.reducer_flag:
            # apply grad reducer on grads
            grads_g = self.grad_reducer(grads_g)
        return ops.depend(lg, self.optimizer(grads_g))


# clipping gradient
clip_grad = C.MultitypeFuncGraph("clip_grad")


@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
    """
    Clip gradients.

    Inputs:
        clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
        clip_value (float): Specifies how much to clip.
        grad (tuple[Tensor]): Gradients.

    Outputs:
        tuple[Tensor], clipped gradients.
    """
    if clip_type not in (0, 1):
        return grad
    if clip_type == 0:
        new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), ms.float32),
                                   F.cast(F.tuple_to_array((clip_value,)), ms.float32))
    else:
        new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), ms.float32))
    return new_grad