import mindspore as ms
from mindspore import nn, ops
from mindspore.common import initializer as init
import numpy as np

class ClassLoss(nn.Cell):
    def __init__(self):
        super(ClassLoss, self).__init__()
        self.entropy_loss = nn.SoftmaxCrossEntropyWithLogits(
            sparse=True, reduction='none')
        self.oneslike = ops.OnesLike()
        self.zeroslike = ops.ZerosLike()
        self.reducesum = ops.ReduceSum(
            keep_dims=False)  # 保留维度，后面才可以用asnumpy()方法
        self.topk = ops.TopK(sorted=True)
        self.squeeze = ops.Squeeze()
        self.keep_num = 150
        self.keep_ratio = 0.7
        self.reducemean = ops.ReduceMean()

    def construct(self, class_out, label):
        # 保留neg 0 和pos 1 的数据，忽略掉part -1, landmark -2
        label = label.astype(ms.int32)
        label = ms.numpy.where(
            label < 0, 100 * self.oneslike(label), label)  # (batchsize, 1)
        # 求neg 0 和pos 1 的数据70%数据
        # keep_num = int(num_valid * self.keep_ratio)
        label = self.squeeze(label)
        # 计算交叉熵损失
        label = label.astype(ms.int32)
        loss = self.entropy_loss(logits=class_out, labels=label)
        # 取有效数据的70%计算损失
        loss, _ = self.topk(loss, self.keep_num)  # 默认选择最后一维
        return  self.reducemean(loss)

# class BBoxLoss(nn.Cell):
#     def __init__(self):
#         super(BBoxLoss, self).__init__()
#         self.square_loss = nn.MSELoss(reduction='none')
#         self.msabs = ops.Abs()
#         self.oneslike = ops.OnesLike()
#         self.zeroslike = ops.ZerosLike()
#         self.squeeze = ops.Squeeze()
#         self.reducesum1 = ops.ReduceSum(keep_dims=True)
#         self.reducesum2 = ops.ReduceSum(keep_dims=False)
#         self.topk = ops.TopK(sorted=True)
#         self.reducemean = ops.ReduceMean()
#         self.keep_num = 100
#
#     def construct(self, bbox_out, bbox_target, label):
#         label = label.astype(ms.int32)
#         # 保留pos 1 和part -1 的数据
#         valid_label = ms.numpy.where(self.msabs(label) == 1, self.oneslike(
#             label), self.zeroslike(label))  # (batchsize, 1)
#         valid_label = self.squeeze(valid_label)
#         # 获取有效值的总数
#         valid_label = valid_label.astype(ms.float32)
#
#         loss = self.square_loss(
#             logits=bbox_out, labels=bbox_target)  # (batchsize, 4)
#
#         loss = self.reducesum2(loss, 1)  # (batchsize, )
#         loss = loss * valid_label
#         # 取有效数据计算损失
#         loss, _ = self.topk(loss, self.keep_num)
#         return self.reducemean(loss)
class BBoxLoss(nn.Cell):
    def __init__(self):
        super(BBoxLoss, self).__init__()
        self.square_loss = nn.MSELoss(reduction='none')
        self.msabs = ops.Abs()
        self.oneslike = ops.OnesLike()
        self.zeroslike = ops.ZerosLike()
        self.reducesum2 = ops.ReduceSum(keep_dims=False)
        self.topk = ops.TopK(sorted=True)
        self.reducemean = ops.ReduceMean()
        #self.keep_ratio = 1.0
        self.keep_num=100

    def construct(self, bbox_out, bbox_target, label):
        # 保留pos 1 和part -1 的数据
        label = label.astype(ms.int32)
        valid_label = label.astype(ms.int32)
        valid_label = ms.numpy.where(self.msabs(label) == 1, self.oneslike(
            label), self.zeroslike(label))
        valid_label = ops.squeeze(valid_label)
        # 获取有效值的总数
        valid_label=valid_label.astype(ms.int32)
        # keep_num = int(np.sum(valid_label) * self.keep_ratio)
        loss = self.square_loss(logits=bbox_out, labels=bbox_target)
        loss = self.reducesum2(loss, 1)
        loss = loss * valid_label
        # 取有效数据计算损失
        loss, _ = self.topk(loss, self.keep_num)
        return self.reducemean(loss)


# class LandmarkLoss(nn.Cell):
#     def __init__(self):
#         super(LandmarkLoss, self).__init__()
#         self.square_loss = nn.MSELoss(reduction='none')
#         self.oneslike = ops.OnesLike()
#         self.zeroslike = ops.ZerosLike()
#         self.squeeze = ops.Squeeze()
#         self.reducesum1 = ops.ReduceSum(keep_dims=True)
#         self.reducesum2 = ops.ReduceSum(keep_dims=False)
#         self.topk = ops.TopK(sorted=True)
#         self.reducemean = ops.ReduceMean()
#         self.keep_num = 100
#
#     def construct(self, landmark_out, landmark_target, label):
#         label = label.astype(ms.int32)
#         # 只保留landmark数据 -2
#         valid_label = ms.numpy.where(
#             label == -2, self.oneslike(label), self.zeroslike(label))
#         valid_label = self.squeeze(valid_label)
#         # 获取有效值的总数
#         valid_label = valid_label.astype(ms.float32)
#
#         loss = self.square_loss(logits=landmark_out, labels=landmark_target)
#         loss = self.reducesum2(loss, 1)
#         loss = loss * valid_label
#         # 取有效数据计算损失
#         loss, _ = self.topk(loss, self.keep_num)
#         return self.reducemean(loss)

class LandmarkLoss(nn.Cell):
    def __init__(self):
        super(LandmarkLoss, self).__init__()
        self.square_loss = nn.MSELoss(reduction='none')
        # self.reducesum1 = ops.ReduceSum(keep_dims=True)
        self.reducesum = ops.ReduceSum(keep_dims=False)
        self.squeeze = ops.Squeeze()
        self.oneslike = ops.OnesLike()
        self.zeroslike = ops.ZerosLike()
        # self.keep_ratio = 1.0
        self.keep_num = 100
        self.topk = ops.TopK(sorted=True)
        self.reducemean = ops.ReduceMean()

    def construct(self, landmark_out, landmark_target, label):
        # 只保留landmark数据 -2
        label = label.astype(ms.int32)
        # 只保留landmark数据 -2
        valid_label = ms.numpy.where(
            label == -2, self.oneslike(label), self.zeroslike(label))
        valid_label = self.squeeze(valid_label)
        valid_label = valid_label.astype(ms.float32)
        # 获取有效值的总数
        # keep_num = int(np.sum(valid_label)* self.keep_ratio)
        loss = self.square_loss(logits=landmark_out, labels=landmark_target)
        loss = self.reducesum(loss, 1)
        loss = loss* valid_label
        # 取有效数据计算损失
        loss, _ = self.topk(loss, self.keep_num)
        return self.reducemean(loss)

class NetWithLossCell(nn.Cell):
    def __init__(self, backbone, loss_fn, auto_prefix=True):
        super(NetWithLossCell, self).__init__(auto_prefix=auto_prefix)
        self._backbone = backbone
        self._loss_fn = loss_fn

    def construct(self, img, label, bbox, landmark):
        class_out, bbox_out, landmark_out = self._backbone(img)
        loss = self._loss_fn(class_out, bbox_out,landmark_out, label, bbox, landmark)
        return loss

class LossForMultiLabel(nn.Cell):
    def __init__(self):
        super(LossForMultiLabel, self).__init__()
        self.class_loss = ClassLoss()
        self.bbox_loss = BBoxLoss()
        self.landmark_loss = LandmarkLoss()
    def construct(self, class_out, bbox_out, landmark_out, label, bbox, landmark):
        # 设置损失值的比例
        radio_cls_loss = 1.0
        radio_bbox_loss = 0.5
        radio_landmark_loss = 0.5
        cls_loss = self.class_loss(class_out, label)
        box_loss = self.bbox_loss(bbox_out, bbox, label)
        landmarks_loss = self.landmark_loss(landmark_out, landmark, label)
        # print(f'cls_loss:{cls_loss}, box_loss:{box_loss}, landmarks_loss:{landmarks_loss}')
        total_loss = radio_cls_loss * cls_loss + radio_bbox_loss * \
                                                 box_loss + radio_landmark_loss * landmarks_loss
        total_loss = self.cast(total_loss, ms.float32)
        return total_loss

# 求训练时的准确率
def accuracy(class_out, label):
    # 查找neg 0 和pos 1所在的位置
    class_out = class_out.asnumpy()
    label = label.asnumpy()
    label = np.squeeze(label)
    zeros = np.zeros(label.shape)
    cond = np.greater_equal(label, zeros)
    picked = np.where(cond)
    valid_label = label[picked]
    valid_class_out = class_out[picked]
    # 求neg 0 和pos 1的准确率
    acc = np.sum(np.argmax(valid_class_out, axis=1) == valid_label, dtype='float')
    acc = acc / valid_label.shape[0]
    return acc
