

import mindspore as ms
from mindspore import nn
from src.ms_model import Model
import numpy as np

class MultiLosses(nn.Cell):
    def __init__(self, one_hot=True):
        super().__init__()
        #self.ce = SoftCrossEntropyLoss() if one_hot else torch.nn.CrossEntropyLoss()
        self.ce = SoftCrossEntropyLoss() if one_hot else nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
        #self.bce = torch.nn.BCELoss()
        self.bce = nn.BCELoss(reduction= 'mean')
        self.losses = {}

    #@property
    def last_losses(self):
        return self.losses

    # def _flatten(self, sources, lengths):
        
    #     #原代码，但是出现动态shape问题
    #     concat_op = ms.ops.Concat()
    #     concat_data = concat_op([t[:l] for t, l in zip(sources, lengths)])#动态shape问题
    #     print(concat_data.shape)




    #     #试图用别的方法进行concat，发现不行
    #     print(sources.shape)
    #     print(lengths.shape)
    #     dataset_zip = zip(sources,lengths)
    #     for_num = 0
    #     for s,l in dataset_zip:
    #         dict_s_shape = {'s.shape:':s.shape}
    #         dict_l = {'l:':l}
    #         print(dict_s_shape)
    #         print(dict_l)
    #         x = s[:l]
    #         dict_x_shape = {'x.shape:':x.shape}
    #         print(dict_x_shape)
    #         print(for_num)
    #         for_num = for_num + 1
    #         if(for_num == 1): x1 = s[for_num-1:for_num]
    #         if(for_num == 2): x2 = s[for_num-1:for_num]
    #     concat_data = concat_op(x1,x2)
    #     print(concat_data)
    #     # x1 = x1.asnumpy()
    #     # x2 = x2.asnumpy()
    #     # x1 = np.array(x1)
    #     # x2 = np.array(x2)
    #     # concat_1 = np.concatenate(x1,x2)
    #     # print(concat_1)

    #     data_list = {}
    #     dataset_zip = zip(sources,lengths)
    #     for s,l in dataset_zip:
    #         mask = ms.Tensor(np.zeros(26))
    #         print(l)
    #         data_length = int(l)
    #         for i in range(data_length):
    #             mask = 1


    #     concat_data = concat_op(data_list[i] for i in len(data_list))






    #     #仅调试用，发现问题就出现在这段代码里
    #     #concat_data = ms.Tensor(np.random.rand(2,3),dtype = ms.float32) #仅调试用。
    #     return concat_data 

    def _merge_list(self, all_res):
        if not isinstance(all_res, (list, tuple)):
            return all_res
        def merge(items):
            concat_op = ms.ops.Concat(axis= 0)
            if isinstance(items[0], ms.Tensor): return concat_op(items)
            else: return items[0]
        res = dict()
        for key in all_res[0].keys():
            items = [r[key] for r in all_res]
            res[key] = merge(items)
        return res

    def _ce_loss(self, output, gt_labels, gt_lengths, idx=None, record=True):
        loss_name = output.get('name')
        pt_logits, weight = output['logits'], output['loss_weight']

        assert pt_logits.shape[0] % gt_labels.shape[0] == 0

        iter_size = pt_logits.shape[0] // gt_labels.shape[0]
        print("name:",loss_name,"iter_size:",iter_size)
        type_dst = ms.float32
        cast = ms.ops.Cast()
        gt_labels = cast(gt_labels, type_dst)
        #gt_lengths = cast(gt_lengths, type_dst)
        pt_logits = cast(pt_logits, type_dst)

      
        if iter_size > 1:
            #d = np.tile(a, (2, 1, 2))
            print("aaaaaaaaaaaaaaaaaaaaa")
            gt_labels = gt_labels.asnumpy()
            gt_lengths = gt_lengths.asnumpy()
            gt_labels = np.tile(gt_labels,(3,1,1))
            gt_lengths = np.tile(gt_lengths,3)


        gt_labels = ms.Tensor(gt_labels)
        gt_lengths = ms.Tensor(gt_lengths)
        #pt_logits = ms.Tensor(pt_logits)
        gt_lengths = gt_lengths.astype(dtype = ms.int64)
        #flat_gt_labels = self._flatten(gt_labels, gt_lengths)
        #flat_pt_logits = self._flatten(pt_logits, gt_lengths)

        nll = output.get('nll')
        if nll is not None:
            loss = self.ce(gt_labels,pt_logits,gt_lengths, softmax=False) * weight
        else:
            #loss = self.ce(flat_pt_logits, flat_gt_labels) * weight
            loss = self.ce(gt_labels,pt_logits,gt_lengths) * weight
        if record and loss_name is not None: self.losses[f'{loss_name}_loss'] = loss

        return loss

#     def construct(self, outputs, *args):
#         # self.losses = {}
#         if isinstance(outputs, (tuple, list)):

#             outputs = [self._merge_list(o) for o in outputs]
#             return sum([self._ce_loss(o, *args) for o in outputs if o['loss_weight'] > 0.])
#         else:
#             return self._ce_loss(outputs, *args, record=False)

    def construct(self, outputs, label, length,label_for_mask):
        # self.losses = {}
        if isinstance(outputs, (tuple, list)):

            outputs = [self._merge_list(o) for o in outputs]
            return sum([self._ce_loss(o, label, length,label_for_mask) for o in outputs if o['loss_weight'] > 0.])
        else:
            return self._ce_loss(outputs, label, length,label_for_mask, record=False)



class SoftCrossEntropyLoss(nn.Cell):
    def __init__(self, reduction="mean"):
        super().__init__()
        self.reduction = reduction
        

    def construct(self, gt_labels,pt_logits,gt_lengths, softmax=True):


        log_softmax = nn.LogSoftmax(axis= -1)
        log = ms.ops.Log()
        concat_op = ms.ops.Concat()
        data_pt_list = []
        mask_list = []
        gt_list = []
        # data_pt_list_one = []
        # mask_list_one = []
        # gt_list_one = []
        # data_pt_list_two = []
        # mask_list_two = []
        # gt_list_two = []
        loss = 0
        mean_divide = 0
        mask_1_vector = ms.Tensor(np.ones((37)))
        dataset_pt_zip = zip(pt_logits,gt_lengths,gt_labels)
        
        
        # print("before for")
        # concat_num = 0
        for s_pt,l_pt,gt in dataset_pt_zip:
            
            #min_d_pt_vector = np.ones((26,37))
            #min_d_pt_vector = ms.Tensor(min_d_pt_vector * (-100))
            #mask_pt = ms.Tensor(np.zeros((26,37)))
            
            

            data_length = int(l_pt)
            mean_divide = mean_divide + data_length
            mask_pt = gt > 0
            
            data_pt_list.append(s_pt)
            mask_list.append(mask_pt)
            gt_list.append(gt)

        concat_pt_logits = concat_op(data_pt_list)
        
        concat_mask = concat_op(mask_list)
        
        concat_gt_labels = concat_op(gt_list)
        #     concat_num =  concat_num + 1
            
        #     if(concat_num < 144):
        #         data_pt_list_one.append(s_pt)
        #         mask_list_one.append(mask_pt)
        #         gt_list_one.append(gt)
        #     if(concat_num >= 144):
                
        #         data_pt_list_two.append(s_pt)
        #         mask_list_two.append(mask_pt)
        #         gt_list_two.append(gt)
        # print("length1",len(mask_list_one))
        # print("length2",len(mask_list_two))
        # print("after for")
        # concat_pt_logits_one = concat_op(data_pt_list_one)
        
        # concat_mask_one = concat_op(mask_list_one)
        
        # concat_gt_labels_one = concat_op(gt_list_one)
        # print("after one")
        # concat_mask_two = concat_op(mask_list_two)

        # concat_pt_logits_two = concat_op(data_pt_list_two)
        
        
        
        # concat_gt_labels_two = concat_op(gt_list_two)
        


        #concat_gt_labels = concat_op([t for t in gt_labels])
        concat_mask = concat_mask.astype(ms.float16)
        concat_pt_logits = concat_pt_logits * concat_mask

        # concat_mask_two = concat_mask_two.astype(ms.float16)
        # concat_pt_logits_two = concat_pt_logits_two * concat_mask_two        
        if softmax: 
            concat_pt_logits = concat_pt_logits.astype(ms.float32)
            log_prob = log_softmax(concat_pt_logits)
            # concat_pt_logits_two = concat_pt_logits_two.astype(ms.float32)
            # log_prob_two = log_softmax(concat_pt_logits_two)
        else: 
            log_prob = log(concat_pt_logits)         
            # log_prob_two = log(concat_pt_logits_two)      

        loss = - (concat_gt_labels * log_prob)
        loss = loss.astype(ms.float16)
        
        loss = loss * concat_mask

        # loss_two = - (concat_gt_labels_two * log_prob_two)
        # loss_two = loss_two.astype(ms.float16)
        
        # loss_two = loss_two * concat_mask_two        
        
        loss = loss.sum(axis = -1)#本身要加的
        loss = loss.sum(axis = -1)#为了做mean，取平均值加的。

        # loss_two = loss_two.sum(axis = -1)#本身要加的
        # loss_two = loss_two.sum(axis = -1)#为了做mean，取平均值加的。 

        # loss = loss_one + loss_two
        loss_mean = loss / mean_divide

        if self.reduction == "mean": return loss_mean
        elif self.reduction == "sum": return loss
        else: return loss
        # loss = ms.Tensor(15)
        # return loss
