#from fastai.vision import *

import mindspore as ms
from mindspore import nn
from ms_model import Model
import numpy as np

class MultiLosses(nn.Cell):
    def __init__(self, one_hot=True):
        super().__init__()
        #self.ce = SoftCrossEntropyLoss() if one_hot else torch.nn.CrossEntropyLoss()
        self.ce = SoftCrossEntropyLoss() if one_hot else nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
        #self.bce = torch.nn.BCELoss()
        self.bce = nn.BCELoss(reduction= 'mean')
        self.losses = {}

    #@property
    def last_losses(self):
        return self.losses

    def _flatten(self, sources, lengths):
        
        concat_op = ms.ops.Concat()
        #concat_data = concat_op([t[:l] for t, l in zip(sources, lengths)])#动态shape问题
        print(sources.shape)
        print(lengths.shape)
        dataset_zip = zip(sources,lengths)
        for_num = 0
        for s,l in dataset_zip:
            dict_s_shape = {'s.shape:':s.shape}
            dict_l = {'l:':l}
            print(dict_s_shape)
            print(dict_l)
            x = s[:l]
            dict_x_shape = {'x.shape:':x.shape}
            print(dict_x_shape)
            print(for_num)
            for_num = for_num + 1
            if(for_num == 1): x1 = s[for_num-1:for_num]
            if(for_num == 2): x2 = s[for_num-1:for_num]
        concat_data = concat_op(x1,x2)
        print(concat_data)
        # x1 = x1.asnumpy()
        # x2 = x2.asnumpy()
        # x1 = np.array(x1)
        # x2 = np.array(x2)
        # concat_1 = np.concatenate(x1,x2)
        # print(concat_1)
        concat_data = ms.Tensor(np.random.rand(20,37),dtype = ms.float32) #仅调试用。
        return concat_data

    def _merge_list(self, all_res):
        if not isinstance(all_res, (list, tuple)):
            return all_res
        def merge(items):
            concat_op = ms.ops.Concat(axis= 0)
            if isinstance(items[0], ms.Tensor): return concat_op(items)
            else: return items[0]
        res = dict()
        for key in all_res[0].keys():
            items = [r[key] for r in all_res]
            res[key] = merge(items)
        return res

    def _ce_loss(self, output, gt_labels, gt_lengths, idx=None, record=True):
        loss_name = output.get('name')
        pt_logits, weight = output['logits'], output['loss_weight']

        assert pt_logits.shape[0] % gt_labels.shape[0] == 0

        iter_size = pt_logits.shape[0] // gt_labels.shape[0]
        type_dst = ms.float32
        cast = ms.ops.Cast()
        gt_labels = cast(gt_labels, type_dst)
        #gt_lengths = cast(gt_lengths, type_dst)
        pt_logits = cast(pt_logits, type_dst)

      
        if iter_size > 1:
            #d = np.tile(a, (2, 1, 2))
            gt_labels = gt_labels.asnumpy()
            gt_lengths = gt_lengths.asnumpy()
            gt_labels = np.tile(gt_labels,(3,1,1))
            gt_lengths = np.tile(gt_lengths,3)
            # gt_lengths = gt_lengths.repeat(3)
            # gt_labels = gt_labels.repeat(3, 1, 1)

        gt_labels = ms.Tensor(gt_labels)
        gt_lengths = ms.Tensor(gt_lengths)
        #pt_logits = ms.Tensor(pt_logits)
        gt_lengths = gt_lengths.astype(dtype = ms.int64)
        flat_gt_labels = self._flatten(gt_labels, gt_lengths)
        flat_pt_logits = self._flatten(pt_logits, gt_lengths)

        nll = output.get('nll')
        if nll is not None:
            loss = self.ce(flat_pt_logits, flat_gt_labels, softmax=False) * weight
        else:
            loss = self.ce(flat_pt_logits, flat_gt_labels) * weight
        if record and loss_name is not None: self.losses[f'{loss_name}_loss'] = loss

        return loss

    def construct(self, outputs, *args):
        # self.losses = {}
        #return ms.Tensor(80.0,dtype=ms.float32)  #去掉loss发现可以正常训练了

        if isinstance(outputs, (tuple, list)):
            
            outputs = [self._merge_list(o) for o in outputs]
            return sum([self._ce_loss(o, *args) for o in outputs if o['loss_weight'] > 0.])
        else:
            return self._ce_loss(outputs, *args, record=False)


class SoftCrossEntropyLoss(nn.Cell):
    def __init__(self, reduction="mean"):
        super().__init__()
        self.reduction = reduction

    def construct(self, input, target, softmax=True):
        log_softmax = nn.LogSoftmax(axis= -1)
        log = ms.ops.Log()
        #if softmax: log_prob = F.log_softmax(input, dim=-1)
        if softmax: log_prob = log_softmax(input)
        else: log_prob = log(input)
        loss = -(target * log_prob).sum(axis= -1)
        if self.reduction == "mean": return loss.mean()
        elif self.reduction == "sum": return loss.sum()
        else: return loss
