from mindspore import nn, ParameterTuple
import mindspore.ops as ops
import numpy as np
from network.vgg16 import VGG16,VGG16_bn
from mindspore.ops import ROIAlign
from layers import GCN
import mindspore
from layers import KnnGraph
from layers import Graph_RPN

from mindspore.ops import operations as P
from mindspore.ops import composite as C


class UpBlok(nn.Cell):

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, pad_mode='pad')
        self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, pad_mode='pad')
        self.deconv = nn.Conv2dTranspose(out_channels, out_channels, kernel_size=4, stride=2, padding=1, pad_mode='pad')
        self.relu=nn.ReLU()

    def construct(self, upsampled, shortcut):
        x = ops.concat((upsampled, shortcut), 1)
        x = self.conv1x1(x)
        x = self.relu(x)
        x = self.conv3x3(x)
        x = self.relu(x)
        x = self.deconv(x)
        return x


class FPN(nn.Cell):

    def __init__(self, backbone='vgg', is_training=True):
        super().__init__()

        self.is_training = is_training
        self.backbone_name = backbone
        self.class_channel = 6
        self.reg_channel = 2
        self.relu=nn.ReLU()

        if backbone == "vgg" or backbone == 'vgg_bn':
            if backbone == 'vgg_bn':
                self.backbone = VGG16_bn()
            elif backbone == 'vgg':
                self.backbone = VGG16()

            self.deconv5 = nn.Conv2dTranspose(512, 256, kernel_size=4, stride=2, padding=1, pad_mode='pad')
            self.merge4 = UpBlok(512 + 256, 128)
            self.merge3 = UpBlok(256 + 128, 64)
            self.merge2 = UpBlok(128 + 64, 32)
            self.merge1 = UpBlok(64 + 32, 32)

        else:
            print("backbone is not support !")

    def construct(self, x):
        C1, C2, C3, C4, C5 = self.backbone(x)
        up5 = self.deconv5(C5)
        up5 = self.relu(up5)

        up4 = self.merge4(C4, up5)
        up4 = self.relu(up4)

        up3 = self.merge3(C3, up4)
        up3 = self.relu(up3)

        up2 = self.merge2(C2, up3)
        up2 = self.relu(up2)

        up1 = self.merge1(C1, up2)

        return up1, up2, up3, up4, up5


class TextNet(nn.Cell):

    def __init__(self, backbone='vgg', is_training=True, args=None):
        super().__init__()

        self.device_target = args.device_target

        self.k_at_hop = [8, 4]
        self.post_dim = 120
        self.active_connection = 3
        self.is_training = is_training
        self.backbone_name = backbone
        self.fpn = FPN(self.backbone_name)
        self.gcn_model = GCN(600, 32)  # 600 = 480 + 120
        self.pooling = ROIAlign(3, 4, spatial_scale = 1.0 / 1,sample_num=1)  # (32+8)*3*4 =480

        # ##class and regression branch
        self.out_channel = 8
        conv = nn.Conv2d(32, self.out_channel, kernel_size=1, stride=1, padding=0, pad_mode='pad')
        
        self.predict = nn.SequentialCell(
            [conv]
        )

        # ## gcn branch
        if is_training:
            self.graph = KnnGraph(self.k_at_hop, self.active_connection, self.pooling, 120, self.is_training) #feats, gt_data
        else:
            self.graph = Graph_RPN(self.pooling, 120) #image, output, graph_feat

        # self.graph = KnnGraph(self.k_at_hop, self.active_connection, self.pooling, 120, self.is_training)
        # self.graph = Graph_RPN(self.pooling, 120)


        

    def load_model(self, model_path):
        print('Loading from {}'.format(model_path))
        state_dict = mindspore.load_checkpoint(model_path)
        self.load_param_into_net(state_dict['model'])

    def construct(self, x, roi_data=None):
        up1, up2, up3, up4, up5 = self.fpn(x)
        predict_out = self.predict(up1)

        graph_feat = ops.concat((up1, predict_out), 1)
        feat_batch, adj_batch, h1id_batch, gtmat_batch = self.graph(graph_feat, roi_data)
        gcn_pred = self.gcn_model(feat_batch, adj_batch, h1id_batch)

        return predict_out, (gcn_pred, gtmat_batch)

    def forward_test(self, img):
        up1, up2, up3, up4, up5 = self.fpn(img)
        predict_out = self.predict(up1)

        return predict_out

    def forward_test_graph(self, img):
        up1, up2, up3, up4, up5 = self.fpn(img)
        predict_out = self.predict(up1)

        graph_feat = ops.concat((up1, predict_out), 1)

        flag, datas = self.graph(img, predict_out, graph_feat)
        feat, adj, cid, h1id, node_list, proposals, output = datas
        if flag:

            return None, None, None, output

        adj, cid, h1id = map(lambda x: x.cuda(), (adj, cid, h1id))
        gcn_pred = self.gcn_model(feat, adj, h1id)

        pred = ops.softmax(gcn_pred, dim=1)

        edges = list()
        scores = list()
        node_list = node_list.long().squeeze().cpu().numpy()
        bs = feat.size(0)

        for b in range(bs):
            cidb = cid[b].int().item()
            nl = node_list[b]
            for j, n in enumerate(h1id[b]):
                n = n.item()
                edges.append([nl[cidb], nl[n]])
                scores.append(pred[b * (h1id.shape[1]) + j, 1].item())

        edges = np.asarray(edges)
        scores = np.asarray(scores)

        return edges, scores, proposals, output


class TextNetWithLossCell(nn.Cell):
    def __init__(self, network):
        super(TextNetWithLossCell, self).__init__()
        self.textNet=network

    def ohem(predict, target, train_mask, negative_ratio=3.):

        pos = target * train_mask
        neg = (1 - target) * train_mask
        n_pos = pos.astype(float).sum()
        
        # print(n_pos)

        if n_pos.item(0) > 0:
            loss_pos = ops.cross_entropy(predict[pos], target[pos], reduction='sum')
            loss_neg = ops.cross_entropy(predict[neg], target[neg], reduction='none')
            n_neg = min(int(neg.sum()), int(negative_ratio * n_pos))
        else:
            loss_pos = mindspore.tensor(0.)
            loss_neg = ops.cross_entropy(predict[neg], target[neg], reduction='none')
            n_neg = 100
        loss_neg, _ = ops.top_k(loss_neg, n_neg)

        return (loss_pos + loss_neg.sum()) / (n_pos + n_neg)

    def smooth_l1_loss(inputs, target, sigma=9.0):
        # try:
        diff = ops.Abs()(inputs - target)
        less_one = (diff < 1.0 / sigma)
        loss = less_one * 0.5 * diff ** 2 * sigma \
                + ops.Abs()(mindspore.Tensor(1.0) - less_one) * (diff - 0.5 / sigma)
        loss = ops.ReduceMean(keep_dims=False)(loss) if ops.size(loss) > 0 else mindspore.Tensor(0.0)
        # except Exception as e:
        #     print('RPN_REGR_Loss Exception:', e)
        #     loss = mindspore.Tensor(0.0)

        return loss

    def gcn_loss(self, gcn_data):
        # gcn loss
        gcn_pred = gcn_data[0]
        labels = gcn_data[1].view(-1).astype(mindspore.int64)
        loss = ops.cross_entropy(gcn_pred, labels)  # *mindspore.tensor(0.0)

        return loss

    def textLoss(self, inputs, gcn_data, train_mask, tr_mask, tcl_mask, radii_map, sin_map, cos_map):
        """
        calculate textsnake loss
        :param inputs: (Variable), network predict, (BS, 8, H, W)!
        :param gcn_data: (Variable), (gcn_pred ,gtmat_batch)!
        :param tr_mask: (Variable), TR target, (BS, H, W)!
        :param tcl_mask: (Variable), TCL target, (BS, H, W)
        :param sin_map: (Variable), sin target, (BS, H, W)!
        :param cos_map: (Variable), cos target, (BS, H, W)!
        :param radii_map: (Variable), radius target, (BS, H, W)
        :param train_mask: (Variable), training mask, (BS, H, W)!
        :return: loss_tr, loss_tcl, loss_radii, loss_sin, loss_cos
        """

        tr_pred = inputs[:, :2].transpose(0, 2, 3, 1).view(-1, 2)  # (BSxHxW, 2)
        tcl_pred = inputs[:, 2:4].transpose(0, 2, 3, 1).view(-1, 2)  # (BSxHxW, 2)
        sin_pred = inputs[:, 4].view(-1)  # (BSxHxW,)
        cos_pred = inputs[:, 5].view(-1)  # (BSxHxW,)

        # regularize sin and cos: sum to 1

        scale = ops.Sqrt()(1.0 / (sin_pred ** 2 + cos_pred ** 2 + 0.0001))
        sin_pred = sin_pred * scale
        cos_pred = cos_pred * scale

        top_pred = inputs[:, 6].view(-1)  # (BSxHxW,)
        bot_pred = inputs[:, 7].view(-1)  # (BSxHxW,)

        train_mask = train_mask.view(-1).astype(mindspore.int32)  # (BSxHxW,)

        tr_mask = tr_mask.view(-1).astype(mindspore.int32)
        tcl_mask = tcl_mask[:, :, :, 0].view(-1).astype(mindspore.int32)
        sin_map = sin_map.view(-1)
        cos_map = cos_map.view(-1)
        top_map = radii_map[:, :, :, 0].view(-1)
        bot_map = radii_map[:, :, :, 1].view(-1)

        # loss_tr = F.cross_entropy(tr_pred[train_mask], tr_mask[train_mask])
        loss_tr = self.ohem(tr_pred, tr_mask, train_mask)

        loss_tcl = mindspore.Tensor(0.)
        
        tr_train_mask = train_mask * tr_mask
        # tr_train_mask = train_mask * tr_mask
        tr_neg_mask = 1 - tr_train_mask
        if tr_train_mask.sum() > 0:
            loss_tcl_pos = ops.cross_entropy(tcl_pred[tr_train_mask], tcl_mask[tr_train_mask])
            loss_tcl_neg = ops.cross_entropy(tcl_pred[tr_neg_mask], tcl_mask[tr_neg_mask])
            loss_tcl = loss_tcl_pos #+ loss_tcl_neg

        # geometry losses
        loss_radii = mindspore.Tensor(0.)
        loss_sin = mindspore.Tensor(0.)
        loss_cos = mindspore.Tensor(0.)
        tcl_train_mask = train_mask * tcl_mask
        if tcl_train_mask.sum() > 0:
            ones = ops.OnesLike()(top_pred[tcl_mask])
            sml1_loss=nn.SmoothL1Loss(reduction='none')

            loss_top = sml1_loss(top_pred[tcl_mask] / (top_map[tcl_mask]+0.01), ones)
            loss_bot = sml1_loss(bot_pred[tcl_mask] / (bot_map[tcl_mask]+0.01), ones)

            rad_map = top_map[tcl_mask] + bot_map[tcl_mask]
            # loss_radii = ops.ReduceMean(keep_dims=False)(torch.log10(rad_map+1.0)*(loss_top+loss_bot))
            loss_radii = ops.ReduceMean(keep_dims=False)(loss_top + loss_bot)

            # loss_radii=mindspore.tensor(0);
            loss_sin = self.smooth_l1_loss(sin_pred[tcl_mask], sin_map[tcl_mask])
            loss_cos = self.smooth_l1_loss(cos_pred[tcl_mask], cos_map[tcl_mask])

        # ##  Graph convolution loss
        gcn_loss = self.gcn_loss(gcn_data)
        # gcn_loss = mindspore.tensor(0.)

        return loss_tr, loss_tcl, loss_sin, loss_cos, loss_radii, gcn_loss


    def construct(self, img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi):
        output, gcn_data = self.textNet(img, gt_roi)
        tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss, gcn_loss \
            = self.textLoss(output, gcn_data, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)
        loss = tr_loss + tcl_loss + sin_loss + cos_loss + radii_loss + gcn_loss
        return loss



class TrainStepWrap(nn.Cell):
    """
    train net
    """

    def __init__(self, network):
        super(TrainStepWrap, self).__init__()
        self.network = network
        self.network.set_train()
        self.weights = ParameterTuple(network.trainable_params())
        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.sens = 1.0

    def construct(self, img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi):
        weights = self.weights
        loss = self.network(img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi)
        sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
        grads = self.grad(self.network, weights)(img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi, sens)
        self.optimizer(grads)
        return loss


def get_text_net(args):
    """
    Get network of wide&deep model.
    """
    Text_net = TextNet(args=args)
    loss_net = TextNetWithLossCell(Text_net)
    train_net = TrainStepWrap(loss_net)
    return loss_net, train_net
