
from mindspore.ops import operations as P
import numpy as np
from mindspore import nn
# from src.ms_model import Model
import mindspore.numpy as msnp
import mindspore as ms
class MultiLosses(nn.Cell):
    def __init__(self, one_hot=True):
        super().__init__()
        # self.ce = SoftCrossEntropyLoss() if one_hot else torch.nn.CrossEntropyLoss()
        # self.ce = SoftCrossEntropyLoss() if one_hot else nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
        self.ce = SoftCrossEntropyLoss()
        

        self.bce = nn.BCELoss(reduction= 'mean')
        self.losses = ms.Parameter([])
        self.cast = P.Cast()
        

    #@property
    def last_losses(self):
        return self.losses

    def _merge_list(self, all_res):
        if not isinstance(all_res, (list, tuple)):
            return all_res
        def merge(items):
            concat_op = ms.ops.Concat(axis= 0)
            if isinstance(items[0], ms.Tensor): return concat_op(items)
            else: return items[0]
        res = []

        
        for key in all_res[0].keys():
            items = []
          
            for i in range(3):

                items.append(all_res[i][key])
            
            

            res.append(merge(items))
            
        return res

    def _ce_loss(self, output, loss_args,i,idx=None, record=True):

        pt_logits = 1.0
        weight = 1.0

        if(i == 0):

            pt_logits, weight,pt_lengths = output[0], output[2],output[1]
            

        if(i == 1):
            pt_logits, weight,pt_lengths = output[1], output[3],output[2]
            

        if(i == 2):

            output_tolist = []
            pt_logits = output['logits']
            weight = output['loss_weight']
            pt_lengths = output['pt_lengths']
           

        gt_labels  =loss_args[0]
        gt_lengths  =loss_args[1]
        label_for_mask=loss_args[2]
        assert pt_logits.shape[0] % gt_labels.shape[0] == 0

        iter_size = pt_logits.shape[0] // gt_labels.shape[0]
        type_dst = ms.float16
        cast = ms.ops.Cast()
        gt_labels = cast(gt_labels, type_dst)
        gt_lengths = cast(gt_lengths, type_dst)
        pt_logits = cast(pt_logits, type_dst)
        label_for_mask = cast(label_for_mask,type_dst)



        if iter_size > 1:

            # gt_labels = gt_labels.asnumpy()
            
            # gt_lengths = gt_lengths.asnumpy()
            
            # label_for_mask = label_for_mask.asnumpy()
            
            gt_labels = msnp.tile(gt_labels,(3,1,1))
            gt_lengths = msnp.tile(gt_lengths,3)
        
            label_for_mask = msnp.tile(label_for_mask,(3,1))

 
        label_for_mask = label_for_mask[:,msnp.newaxis]


        
        # gt_labels = ms.Tensor(gt_labels)
        # gt_lengths = ms.Tensor(gt_lengths)
        # label_for_mask = ms.Tensor(label_for_mask)
        # gt_lengths = gt_lengths.astype(dtype = ms.int64)






        # gt_labels  = ms.Tensor(np.random.rand(1,26,37),ms.dtype.float16)
        # gt_lengths  = ms.Tensor(np.random.rand(1),ms.dtype.float16)




        # pt_logits  = ms.Tensor(np.random.rand(1,26,37),ms.dtype.float16)
        # label_for_mask  = ms.Tensor(np.random.rand(216,1,26),ms.dtype.float16)
        

        # print(gt_labels[0])
        # print("gt_labels:",gt_labels)
        # print("gt_lengths:",gt_lengths)
        # print("pt_logits:",pt_logits)
        # print("label_for_mask:",label_for_mask)

        loss = self.ce(gt_labels,pt_logits,gt_lengths,label_for_mask) * weight


        return loss

    def construct(self,  outputs, loss_args):
        
        output_list = []
        for i in range(len(outputs)):
            output_list.append(self._merge_list(outputs[i]))
        outputs = output_list
        loss_one = 0
        loss_all = 0
        # loss_one = ms.Tensor(0.0,ms.dtype.float16)
        # loss_all = ms.Tensor(0.0,ms.dtype.float16)
        for i in range(3):
            loss_one = self._ce_loss(outputs[i], loss_args,i)
            loss_all = loss_one + loss_all
        # loss_all = loss_all.astype(ms.dtype.float16)
        
        # loss_all = ms.Tensor(loss_all,ms.dtype.float16)
        # loss_all = ms.Tensor(15.0,ms.dtype.float16)
        return loss_all



class SoftCrossEntropyLoss(nn.Cell):
    def __init__(self, reduction="mean"):
        super().__init__()
        # self.reduction = reduction
        

    def construct(self, gt_labels,pt_logits,gt_lengths, label_for_mask,softmax=True):


        log_softmax = nn.LogSoftmax(axis= -1)
        log = ms.ops.Log()
        concat_op = ms.ops.Concat()
        data_pt_list = []
        mask_list = []
        gt_list = []

        loss = 0
        mean_divide = 0
        mask_1_vector = ms.Tensor(np.ones((37)))


        for i  in range(pt_logits.shape[0]):

            # print("pt_logits.shape[0]:",pt_logits.shape[0])
            data_length = gt_lengths[i]
            # print(data_length)
            # print("data_length:",data_length)
            # data_length = int(data_length)
            mean_divide = mean_divide + data_length
            mask_pt = label_for_mask[i] > 0

            mask_pt = mask_pt.transpose(1,0)

            data_pt_list.append(pt_logits[i])
            mask_list.append(mask_pt)
            gt_list.append(gt_labels[i])
        





        # dataset_pt_zip = zip(pt_logits,gt_lengths,gt_labels,label_for_mask)
        # print("pt_logits[0]:",pt_logits[0])
        # print("pt_logits[1]:",pt_logits[1])
        

        # for s_pt,l_pt,gt,label_for_mask in dataset_pt_zip:


        #     data_length = int(l_pt)

        #     mean_divide = mean_divide + data_length
        #     mask_pt = label_for_mask > 0

        #     mask_pt = mask_pt.transpose(1,0)

        #     data_pt_list.append(s_pt)
        #     mask_list.append(mask_pt)
        #     gt_list.append(gt)
        

        concat_pt_logits = concat_op(data_pt_list)

        concat_mask = concat_op(mask_list)
        
        concat_gt_labels = concat_op(gt_list)

        


        #concat_gt_labels = concat_op([t for t in gt_labels])
        concat_mask = concat_mask.astype(ms.float16)
        # print("concat_pt_logits:",concat_pt_logits.shape)
        concat_pt_logits = concat_pt_logits * concat_mask
        # print("concat_pt_logits_after_mask:",concat_pt_logits.shape)
        

        # concat_mask_two = concat_mask_two.astype(ms.float16)
        # concat_pt_logits_two = concat_pt_logits_two * concat_mask_two        
        if softmax: 
            concat_pt_logits = concat_pt_logits.astype(ms.float16)
            # print("concat_pt_logits_after_mask_after_softmax:",concat_pt_logits.shape)
            log_prob = log_softmax(concat_pt_logits)
        else: 
            log_prob = log(concat_pt_logits)               
        
        loss = - (concat_gt_labels * log_prob)
        
        loss = loss.astype(ms.float16)
        
        loss = loss * concat_mask

        loss = loss.sum(axis = -1)#本身要加的

        loss = loss.sum(axis = -1)#为了做mean，取平均值加的。

        # loss = loss_one + loss_two
        
        # print("mean_divide:",mean_divide)
        loss_mean = loss / mean_divide

        # print("loss_mean:",loss_mean)

        return loss_mean


        
        














####################从 gpu 粘过来的代码
#################### pynative可以运行

# import mindspore as ms
# from mindspore import nn
# from src.ms_model import Model
# import numpy as np

# class MultiLosses(nn.Cell):
#     def __init__(self, one_hot=True):
#         super().__init__()
#         #self.ce = SoftCrossEntropyLoss() if one_hot else torch.nn.CrossEntropyLoss()
#         self.ce = SoftCrossEntropyLoss() if one_hot else nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
#         #self.bce = torch.nn.BCELoss()
#         self.bce = nn.BCELoss(reduction= 'mean')
#         self.losses = {}
        

#     #@property
#     def last_losses(self):
#         return self.losses

#     def _merge_list(self, all_res):
#         if not isinstance(all_res, (list, tuple)):
#             return all_res
#         def merge(items):
#             concat_op = ms.ops.Concat(axis= 0)
#             if isinstance(items[0], ms.Tensor): return concat_op(items)
#             else: return items[0]
#         res = dict()
#         for key in all_res[0].keys():
#             items = [r[key] for r in all_res]
#             res[key] = merge(items)
#         return res

#     def _ce_loss(self, output, loss_args,idx=None, record=True):
#         loss_name = output.get('name')
#         # print("loss_args:",loss_args)
#         pt_logits, weight,pt_lengths = output['logits'], output['loss_weight'],output['pt_lengths']
#         gt_labels  =loss_args[0]
#         gt_lengths  =loss_args[1]
#         label_for_mask=loss_args[2]
#         assert pt_logits.shape[0] % gt_labels.shape[0] == 0

#         iter_size = pt_logits.shape[0] // gt_labels.shape[0]
#         type_dst = ms.float32
#         cast = ms.ops.Cast()
#         gt_labels = cast(gt_labels, type_dst)
#         #gt_lengths = cast(gt_lengths, type_dst)
#         pt_logits = cast(pt_logits, type_dst)
        
        
#         if iter_size > 1:
#             #d = np.tile(a, (2, 1, 2))
#             gt_labels = gt_labels.asnumpy()
#             gt_lengths = gt_lengths.asnumpy()
#             label_for_mask = label_for_mask.asnumpy()
#             gt_labels = np.tile(gt_labels,(3,1,1))
#             gt_lengths = np.tile(gt_lengths,3)
            
#             label_for_mask = np.tile(label_for_mask,(3,1))
#             # gt_lengths = gt_lengths.repeat(3)
#             # gt_labels = gt_labels.repeat(3, 1, 1)
#         label_for_mask = label_for_mask[:,np.newaxis]
#         gt_labels = ms.Tensor(gt_labels)
#         gt_lengths = ms.Tensor(gt_lengths)
#         label_for_mask = ms.Tensor(label_for_mask)
#         #pt_logits = ms.Tensor(pt_logits)
#         gt_lengths = gt_lengths.astype(dtype = ms.int64)
#         #flat_gt_labels = self._flatten(gt_labels, gt_lengths)
#         #flat_pt_logits = self._flatten(pt_logits, gt_lengths)

#         nll = output.get('nll')
#         if nll is not None:
#             loss = self.ce(gt_labels,pt_logits,gt_lengths,label_for_mask, softmax=False) * weight
#         else:
#             #loss = self.ce(flat_pt_logits, flat_gt_labels) * weight
#             loss = self.ce(gt_labels,pt_logits,gt_lengths,label_for_mask) * weight
#         if record and loss_name is not None: self.losses[f'{loss_name}_loss'] = loss

#         return loss

#     def construct(self,  outputs, loss_args):
        
#         # self.losses = {}
#         if isinstance(outputs, (tuple, list)):
#             # print("111111111111111111111111")
#             outputs = [self._merge_list(o) for o in outputs]
#             loss_one = 0
#             loss_all = 0
#             for i in range(3):
#                 loss_one = self._ce_loss(outputs[i], loss_args)
#                 loss_all = loss_one + loss_all
#             # return sum([self._ce_loss(o, loss_args) for o in outputs if o['loss_weight'] > 0.])
#             return loss_all
#         else:
#             return self._ce_loss(outputs, loss_args, record=False)


# class SoftCrossEntropyLoss(nn.Cell):
#     def __init__(self, reduction="mean"):
#         super().__init__()
#         self.reduction = reduction
        

#     def construct(self, gt_labels,pt_logits,gt_lengths, label_for_mask,softmax=True):


#         log_softmax = nn.LogSoftmax(axis= -1)
#         log = ms.ops.Log()
#         concat_op = ms.ops.Concat()
#         data_pt_list = []
#         mask_list = []
#         gt_list = []
#         # data_pt_list_one = []
#         # mask_list_one = []
#         # gt_list_one = []
#         # data_pt_list_two = []
#         # mask_list_two = []
#         # gt_list_two = []
#         loss = 0
#         mean_divide = 0
#         mask_1_vector = ms.Tensor(np.ones((37)))
#         dataset_pt_zip = zip(pt_logits,gt_lengths,gt_labels,label_for_mask)
        
        
#         # print("before for")
#         # concat_num = 0
#         for s_pt,l_pt,gt,label_for_mask in dataset_pt_zip:
#             # concat_num = concat_num + 1
#             #min_d_pt_vector = np.ones((26,37))
#             #min_d_pt_vector = ms.Tensor(min_d_pt_vector * (-100))
#             #mask_pt = ms.Tensor(np.zeros((26,37))

#             data_length = int(l_pt)
#             #mean_divide = mean_divide + data_length
#             mean_divide = mean_divide + data_length
#             mask_pt = label_for_mask > 0
#             #print(mask_pt)
#             mask_pt = mask_pt.transpose(1,0)
#             # print("s_pt:",s_pt.shape)
#             # print("mask_pt:",mask_pt.shape)
#             # print("gt:",gt.shape)

#             data_pt_list.append(s_pt)
#             mask_list.append(mask_pt)
#             gt_list.append(gt)
        
#         # print("before concat1")
#         # print("len(data_pt_list:",len(data_pt_list))
#         # print("shape:",data_pt_list[0].shape)
#         concat_pt_logits = concat_op(data_pt_list)
#         # print("after concat1")
#         concat_mask = concat_op(mask_list)
        
#         concat_gt_labels = concat_op(gt_list)

        


#         #concat_gt_labels = concat_op([t for t in gt_labels])
#         concat_mask = concat_mask.astype(ms.float16)
        
#         concat_pt_logits = concat_pt_logits * concat_mask

        

#         # concat_mask_two = concat_mask_two.astype(ms.float16)
#         # concat_pt_logits_two = concat_pt_logits_two * concat_mask_two        
#         if softmax: 
#             concat_pt_logits = concat_pt_logits.astype(ms.float32)
#             log_prob = log_softmax(concat_pt_logits)
#         else: 
#             log_prob = log(concat_pt_logits)               
        
#         loss = - (concat_gt_labels * log_prob)
#         loss = loss.astype(ms.float16)
        
#         loss = loss * concat_mask
    
        
#         loss = loss.sum(axis = -1)#本身要加的
#         loss = loss.sum(axis = -1)#为了做mean，取平均值加的。



#         # loss = loss_one + loss_two
#         loss_mean = loss / mean_divide

#         print("loss_mean:",loss_mean)

#         if self.reduction == "mean": return loss_mean
#         elif self.reduction == "sum": return loss
#         else: return loss








#         #以下为带for循环的loss，巡行速度过慢

#         # log_softmax = nn.LogSoftmax(axis= -1)
#         # log = ms.ops.Log()
#         # concat_op = ms.ops.Concat()
#         # data_pt_list = []
#         # mask_list = []
#         # loss = 0
#         # mean_divide = 0
#         # mask_1_vector = ms.Tensor(np.ones((37)))
#         # dataset_pt_zip = zip(pt_logits,gt_lengths)



#         # # print("before for")
#         # for s_pt,l_pt in dataset_pt_zip:
#         #     min_d_pt_vector = np.ones((26,37))
#         #     min_d_pt_vector = ms.Tensor(min_d_pt_vector * (-100))
#         #     mask_pt = ms.Tensor(np.zeros((26,37)))
#         #     data_length = int(l_pt)
#         #     for i in range(data_length):
#         #         mask_pt[i] = mask_1_vector
#         #         min_d_pt_vector[i] = s_pt[i]
#         #         mean_divide = mean_divide + 1
            
#         #     data_pt_list.append(min_d_pt_vector)
#         #     mask_list.append(mask_pt)
#         # # print("after for")
#         # concat_pt_logits = concat_op(data_pt_list)
#         # concat_mask = concat_op(mask_list)
#         # concat_gt_labels = concat_op([t for t in gt_labels])

#         # if softmax: 
#         #     concat_pt_logits = concat_pt_logits.astype(ms.float32)
#         #     log_prob = log_softmax(concat_pt_logits)
#         # else: 
#         #     log_prob = log(concat_pt_logits)         

#         # loss = - (concat_gt_labels * log_prob)
#         # loss = loss.astype(ms.float16)
#         # concat_mask = concat_mask.astype(ms.float16)
#         # loss = loss * concat_mask
#         # loss = loss.sum(axis = -1)#本身要加的
#         # loss = loss.sum(axis = -1)#为了做mean，取平均值加的。
        
#         # loss_mean = loss / mean_divide
        
#         # if self.reduction == "mean": return loss_mean
#         # elif self.reduction == "sum": return loss
#         # else: return loss




#         #以下是固定返回loss值
        
#         # print("loss_after")
#         # loss = ms.Tensor(15)
#         # return loss






