# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
dataset processing.
"""
import math

import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.ops import ResizeNearestNeighbor
from mindspore import Tensor, ParameterTuple, Parameter
from mindspore.nn.optim import Adam, AdamWeightDecay

from src.config import config as cfg
from src.vgg import Vgg

vgg_cfg = {
    '11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    '13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    '16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    '19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}



def vgg16(num_classes=1000, args=None, phase="train"):
    """
    Get Vgg16 neural network with batch normalization.

    Args:
        num_classes (int): Class numbers. Default: 1000.
        args(namespace): param for net init.
        phase(str): train or test mode.

    Returns:
        Cell, cell instance of Vgg16 neural network with batch normalization.

    Examples:
        >>> vgg16(num_classes=1000, args=args)
    """

    if args is None:
        from src.config import cifar_cfg
        args = cifar_cfg
    net = Vgg(vgg_cfg['16'], num_classes=num_classes, args=args, batch_norm=args.batch_norm, phase=phase)
    return net


class AdvancedEast(nn.Cell):
    """
    East model
    Args:

    """
    def __init__(self):
        super(AdvancedEast, self).__init__()
        self.vgg16 = vgg16()
        param_dict = load_checkpoint(cfg.vgg_weights)
        load_param_into_net(self.vgg16, param_dict)

        self.bn1 = nn.BatchNorm2d(1024, momentum=0.99, eps=1e-3)
        self.conv1 = nn.Conv2d(1024, 128, 1, weight_init='XavierUniform', has_bias=True)
        self.relu1 = nn.ReLU()

        self.bn2 = nn.BatchNorm2d(128, momentum=0.99, eps=1e-3)
        self.conv2 = nn.Conv2d(128, 128, 3, padding=1, pad_mode='pad', weight_init='XavierUniform')
        self.relu2 = nn.ReLU()

        self.bn3 = nn.BatchNorm2d(384, momentum=0.99, eps=1e-3)
        self.conv3 = nn.Conv2d(384, 64, 1, weight_init='XavierUniform', has_bias=True)
        self.relu3 = nn.ReLU()

        self.bn4 = nn.BatchNorm2d(64, momentum=0.99, eps=1e-3)
        self.conv4 = nn.Conv2d(64, 64, 3, padding=1, pad_mode='pad', weight_init='XavierUniform')
        self.relu4 = nn.ReLU()

        self.bn5 = nn.BatchNorm2d(192, momentum=0.99, eps=1e-3)
        self.conv5 = nn.Conv2d(192, 32, 1, weight_init='XavierUniform', has_bias=True)
        self.relu5 = nn.ReLU()

        self.bn6 = nn.BatchNorm2d(32, momentum=0.99, eps=1e-3)
        self.conv6 = nn.Conv2d(32, 32, 3, padding=1, pad_mode='pad', weight_init='XavierUniform', has_bias=True)
        self.relu6 = nn.ReLU()

        self.bn7 = nn.BatchNorm2d(32, momentum=0.99, eps=1e-3)
        self.conv7 = nn.Conv2d(32, 32, 3, padding=1, pad_mode='pad', weight_init='XavierUniform', has_bias=True)
        self.relu7 = nn.ReLU()

        self.unpool1 = ResizeNearestNeighbor((16, 16), align_corners=True)
        self.unpool2 = ResizeNearestNeighbor((32, 32), align_corners=True)
        self.unpool3 = ResizeNearestNeighbor((64, 64), align_corners=True)

        self.cat = P.Concat(axis=1)

        self.conv8 = nn.Conv2d(32, 1, 1, weight_init='XavierUniform', has_bias=True)
        self.conv9 = nn.Conv2d(32, 2, 1, weight_init='XavierUniform', has_bias=True)
        self.conv10 = nn.Conv2d(32, 4, 1, weight_init='XavierUniform', has_bias=True)

    def construct(self, x):
        l2, l3, l4, l5 = self.vgg16(x)
        h = l5

        g = self.unpool1(h)
        c = self.cat((g, l4))

        c = self.bn1(c)
        c = self.conv1(c)
        c = self.relu1(c)

        h = self.bn2(c)
        h = self.conv2(h)
        h = self.relu2(h)

        g = self.unpool2(h)
        c = self.cat((g, l3))

        c = self.bn3(c)
        c = self.conv3(c)
        c = self.relu3(c)

        h = self.bn4(c)
        h = self.conv4(h)  # bs 64 w/8 h/8
        h = self.relu4(h)

        g = self.unpool3(h)  # bs 64 w/4 h/4
        c = self.cat((g, l2))

        c = self.bn5(c)
        c = self.conv5(c)
        c = self.relu5(c)

        h = self.bn6(c)
        h = self.conv6(h)  # bs 32 w/4 h/4
        h = self.relu6(h)

        g = self.bn7(h)
        g = self.conv7(g)  # bs 32 w/4 h/4
        g = self.relu7(g)
        # get output

        inside_score = self.conv8(g)
        side_v_code = self.conv9(g)
        side_v_coord = self.conv10(g)
        east_detect = self.cat((inside_score, side_v_code, side_v_coord))

        return east_detect

def quad_loss(y_true, y_pred,
    lambda_inside_score_loss=4.0,
    lambda_side_vertex_code_loss=1.0,
    lambda_side_vertex_coord_loss=1.0,
    epsilon=1e-4):
    # loss for inside_score
    logits = y_pred[:, :1, :, :]
    labels = y_true[:, :1, :, :]
    # balance positive and negative samples in an image
    beta = 1 - P.ReduceMean()(labels)
    # first apply sigmoid activation
    predicts = P.Sigmoid()(logits)
    # log +epsilon for stable cal
    inside_score_loss = P.ReduceMean()(
        -1 * (beta * labels * P.Log()(predicts + epsilon) +
              (1 - beta) * (1 - labels) * P.Log()(1 - predicts + epsilon)))
    inside_score_loss *= lambda_inside_score_loss

    # loss for side_vertex_code
    vertex_logits = y_pred[:, 1:3, :, :]
    vertex_labels = y_true[:, 1:3, :, :]
    vertex_beta = 1 - (P.ReduceMean()(y_true[:, 1:2, :, :])
                       / (P.ReduceMean()(labels) + epsilon))
    vertex_predicts = P.Sigmoid()(vertex_logits)
    pos = -1 * vertex_beta * vertex_labels * P.Log()(vertex_predicts +
                                                    epsilon)
    neg = -1 * (1 - vertex_beta) * (1 - vertex_labels) * P.Log()(
        1 - vertex_predicts + epsilon)
    positive_weights = P.Equal()(y_true[:, 0, :, :], 1) + 0.0
    #positive_weights =
    side_vertex_code_loss = \
        P.ReduceSum()(P.ReduceSum()(pos + neg, 1) * positive_weights) / (
                P.ReduceSum()(positive_weights) + epsilon)
    side_vertex_code_loss *= lambda_side_vertex_code_loss

    # loss for side_vertex_coord delta
    g_hat = y_pred[:, 3:, :, :]
    g_true = y_true[:, 3:, :, :]
    vertex_weights = P.Equal()(y_true[:, 1, :, :], 1) + 0.0
    pixel_wise_smooth_l1norm = smooth_l1_loss(g_hat, g_true, vertex_weights)
    side_vertex_coord_loss = P.ReduceSum()(pixel_wise_smooth_l1norm) / (
            P.ReduceSum()(vertex_weights) + epsilon)
    side_vertex_coord_loss *= lambda_side_vertex_coord_loss
    #print(inside_score_loss, side_vertex_code_loss, side_vertex_coord_loss)
    return inside_score_loss + side_vertex_code_loss + side_vertex_coord_loss


def smooth_l1_loss(prediction_tensor, target_tensor, weights):
    n_q = P.Reshape()(quad_norm(target_tensor), weights.shape)
    diff = prediction_tensor - target_tensor
    abs_diff = P.Abs()(diff)
    abs_diff_lt_1 = P.Less()(abs_diff, 1)
    pixel1 = P.Mul()(0.5 * P.Square()(abs_diff), abs_diff_lt_1)
    pixel2 = P.Mul()(abs_diff - 0.5, P.Neg()(abs_diff_lt_1-1.0))
    pixel = P.ReduceSum()(pixel1+pixel2, 1)
    pixel_wise_smooth_l1norm = pixel / n_q * weights
    return pixel_wise_smooth_l1norm


def quad_norm(g_true, epsilon=1e-4):
    shape = g_true.shape
    delta_xy_matrix = P.Reshape()(g_true, (shape[0], 2, 2, shape[2], shape[3]))
    diff = delta_xy_matrix[:, 0:1, :, :, :] - delta_xy_matrix[:, 1:2, :, :, :]
    square = P.Square()(diff)
    distance = P.Sqrt()(P.ReduceSum()(square, 2))
    distance *= 4.0
    distance += epsilon
    return P.Reshape()(distance, (shape[0], shape[2], shape[3]))



class EastWithLossCell(nn.Cell):
    """
    loss
    """
    def __init__(self, network, config=None):
        super(EastWithLossCell, self).__init__()
        self.East_network = network
        self.cat = P.Concat(axis=1)

    def construct(self, image, label):
        y_pred = self.East_network(image)
        loss = quad_loss(label, y_pred)
        return loss


class TrainStepWrap(nn.Cell):
    """
    train net
    """
    def __init__(self, network, steps, config=None):
        super(TrainStepWrap, self).__init__()
        self.network = network
        self.network.set_train()
        self.weights = ParameterTuple(network.trainable_params())
        self.optimizer = AdamWeightDecay(self.weights, learning_rate=cfg.learning_rate
                                         , eps=1e-7, weight_decay=cfg.decay)
        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.sens = 3.0

    def construct(self, image, label):
        weights = self.weights
        loss = self.network(image, label)
        sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
        grads = self.grad(self.network, weights)(image, label, sens)
        return F.depend(loss, self.optimizer(grads))



def get_AdvancedEast_net(configure=None, steps=1, mode=True):
    """
    Get network of wide&deep model.
    """
    AdvancedEast_net = AdvancedEast()
    loss_net = EastWithLossCell(AdvancedEast_net, configure)
    train_net = TrainStepWrap(loss_net, steps, configure)
    return loss_net, train_net




if __name__ == '__main__':
    import numpy as np
    from mindspore import Tensor
    import mindspore
    import mindspore.context as context
    #context = context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
    s = np.ones((4, 3, 256, 256))
    loss_net, train_net = get_AdvancedEast_net()
    print(train_net.optimizer.eps)









