# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.nn as nn
import mindspore.ops as ops
from src.networks import Quantization


#########################################################################
#                            Loss
#########################################################################

class ReconstructionLoss(nn.Cell):
    """Define loss type. """

    def __init__(self, losstype='l2', eps=1e-6):
        super(ReconstructionLoss, self).__init__()
        self.losstype = losstype
        self.eps = eps

        self.mean = ops.ReduceMean()
        self.sum = ops.ReduceSum()
        self.sqrt = ops.Sqrt()

    def construct(self, x, target):
        '''construct method for loss'''
        if self.losstype == 'l2':
            return self.mean(self.sum((x - target)**2, (1, 2, 3)))
        if self.losstype == 'l1':
            diff = x - target
            return self.mean(self.sum(self.sqrt(diff * diff + self.eps), (1, 2, 3)))

        print("reconstruction loss type error!")
        return 0


class InvertibleRescalingNetLoss(nn.Cell):
    def __init__(self, args, net):
        super(InvertibleRescalingNetLoss, self).__init__()
        self.Reconstruction_forw = ReconstructionLoss(losstype=args.pixel_criterion_forw)
        self.Reconstruction_back = ReconstructionLoss(losstype=args.pixel_criterion_back)
        self.lambda_rec_back = args.lambda_rec_back
        self.lambda_fit_forw = args.lambda_fit_forw
        self.lambda_ce_forw = args.lambda_ce_forw
        self.gaussian_scale = args.gaussian_scale if args.gaussian_scale != None else 1
        self.sum = ops.ReduceSum()
        self.standardNormal = ops.StandardNormal()
        self.quant = Quantization()
        self.quant.set_train(False)
        self.quant.set_grad(False)
        self.cat = ops.Concat(1)  # Sideways concat
        self.netG = net
        self.scale = args.scale

    def gaussian_batch(self, dims):
        return self.standardNormal(dims)

    def loss_forward(self, out, y, z):

        l_forw_fit = self.lambda_fit_forw * self.Reconstruction_forw(out, y)

        z = z.reshape([out.shape[0], -1])
        l_forw_ce = self.lambda_ce_forw * self.sum(z ** 2) / z.shape[0]

        return l_forw_fit, l_forw_ce

    def loss_backward(self, x, y):
        x_samples = self.netG(x=y, rev=True)
        x_samples_image = x_samples[:, :3, :, :]
        l_back_rec = self.lambda_rec_back * self.Reconstruction_back(x, x_samples_image)
        return l_back_rec

    def test(self, ref_L, real_H):
        """model testing"""
        output = self.netG(real_H)
        Lshape = ref_L.shape
        zshape = (Lshape[0], Lshape[1] * (self.scale ** 2) - Lshape[1], Lshape[2], Lshape[3])

        LR = self.quant(output[:, :3, :, :])

        gaussian_scale = 1
        T = gaussian_scale * self.gaussian_batch(zshape)
        y_forw = self.cat((LR, T))

        fake_H = self.netG(x=y_forw, rev=True)
        fake_H_image = fake_H[:, :3, :, :]

        return real_H[0], ref_L[0], LR[0], fake_H_image[0]

    def construct(self, lr, hr):
        #   l_forw_fit  --LR guidance loss
        #   l_forw_ce   --distribution matching loss
        output = self.netG(x=hr)

        zshape = output[:, 3:, :, :].shape

        LR_ref = lr

        l_forw_fit, l_forw_ce = self.loss_forward(output[:, :3, :, :], LR_ref, output[:, 3:, :, :])

        LR = self.quant(output[:, :3, :, :])

        y_ = self.cat((LR, self.gaussian_scale * self.gaussian_batch(zshape)))

        l_back_rec = self.loss_backward(hr, y_)

        # total loss
        loss = l_back_rec + l_forw_ce + l_forw_fit

        return loss
