'''
Function:
    Implementation of ENCNet
Author:
    Zhenchao Jin
'''
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import Parameter, Tensor
from ..base import BaseSegmentor
from .contextencoding import ContextEncoding
from ...backbones import BuildActivation, BuildNormalization, constructnormcfg


# '''ENCNet'''
# class ENCNet(BaseSegmentor):
#     def __init__(self, cfg, losses_cfg, mode):
#         super(ENCNet, self).__init__(cfg, losses_cfg, mode)
#         self.losses_cfg = losses_cfg
#         align_corners, norm_cfg, act_cfg, head_cfg = self.align_corners, self.norm_cfg, self.act_cfg, cfg['head']
#         # build encoding
#         # --base structurs
#         # self.bottleneck = nn.Sequential(
#         #     nn.Conv2d(head_cfg['in_channels_list'][-1], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1, bias=False),
#         #     BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
#         #     BuildActivation(act_cfg),
#         # )
#         self.bottleneck = nn.SequentialCell(
#             nn.Conv2d(head_cfg['in_channels_list'][-1], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1, pad_mode='pad', has_bias=False),
#             BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
#             BuildActivation(act_cfg),
#         )
#         self.enc_module = ContextEncoding(
#             in_channels=head_cfg['feats_channels'],
#             num_codes=head_cfg['num_codes'],
#             norm_cfg=norm_cfg,
#             act_cfg=act_cfg,
#         )
#         # --extra structures
#         extra_cfg = head_cfg['extra']
#         if extra_cfg['add_lateral']:
#             # self.lateral_convs = nn.ModuleList()
#             self.lateral_convs = nn.CellList()
#             for in_channels in head_cfg['in_channels_list'][:-1]:
#                 # self.lateral_convs.append(
#                 #     nn.Conv2d(in_channels, head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0),
#                 #     BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
#                 #     BuildActivation(act_cfg),
#                 # )
#                 self.lateral_convs.append(
#                     nn.Conv2d(in_channels, head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0, pad_mode='pad'),
#                     BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
#                     BuildActivation(act_cfg),
#                 )
#             # self.fusion = nn.Sequential(
#             #     nn.Conv2d(len(head_cfg['in_channels_list']) * head_cfg['feats_channels'], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1),
#             #     BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
#             #     BuildActivation(act_cfg),
#             # )
#             self.fusion = nn.SequentialCell(
#                 nn.Conv2d(len(head_cfg['in_channels_list']) * head_cfg['feats_channels'], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1, pad_mode='pad'),
#                 BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
#                 BuildActivation(act_cfg),
#             )
#         if extra_cfg['use_se_loss']:
#             # self.se_layer = nn.Linear(head_cfg['feats_channels'], cfg['num_classes'])
#             self.se_layer = nn.Dense(head_cfg['feats_channels'], cfg['num_classes'])
#         # build decoder
#         # self.decoder = nn.Sequential(
#         #     nn.Dropout2d(head_cfg['dropout']),
#         #     nn.Conv2d(head_cfg['feats_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
#         # )
#         self.decoder = nn.SequentialCell(
#             nn.Dropout2d(head_cfg['dropout']),
#             nn.Conv2d(head_cfg['feats_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0, pad_mode='pad')
#         )
#         # build auxiliary decoder
#         self.setauxiliarydecoder(cfg['auxiliary'])
#         # freeze normalization layer if necessary
#         if cfg.get('is_freeze_norm', False): self.freezenormalization()
#         # layer names for training tricks
#         self.layer_names = ['backbone_net', 'bottleneck', 'enc_module', 'decoder', 'lateral_convs', 'fusion', 'se_layer', 'auxiliary_decoder']
#     '''forward'''
#     def forward(self, x, targets=None, losses_cfg=None):
#         # img_size = x.size(2), x.size(3)
#         img_size = x.shape[2], x.shape[3]
#         # feed to backbone network
#         backbone_outputs = self.transforminputs(self.backbone_net(x), selected_indices=self.cfg['backbone'].get('selected_indices'))
#         # feed to context encoding
#         feats = self.bottleneck(backbone_outputs[-1])
#         if hasattr(self, 'lateral_convs'):
#             # lateral_outs = [
#             #     F.interpolate(lateral_conv(backbone_outputs[idx]), size=feats.shape[2:], mode='bilinear', align_corners=self.align_corners) for idx, lateral_conv in enumerate(self.lateral_convs)
#             # ]
#             lateral_outs = [
#                 ops.interpolate(lateral_conv(backbone_outputs[idx]), size=feats.shape[2:], mode='bilinear', align_corners=self.align_corners) for idx, lateral_conv in enumerate(self.lateral_convs)
#             ]
#             # feats = self.fusion(torch.cat([feats, *lateral_outs], dim=1))
#             lateral_outs_cat = ops.cat(lateral_outs, axis=1)
#             # feats = self.fusion(ops.cat([feats, *lateral_outs], axis=1))
#             feats = self.fusion(ops.cat([feats, lateral_outs_cat], axis=1))
#         encode_feats, feats = self.enc_module(feats)
#         predictions_se = None
#         if hasattr(self, 'se_layer'):
#             predictions_se = self.se_layer(encode_feats)
#         # feed to decoder
#         predictions = self.decoder(feats)
#         # forward according to the mode
#         if self.mode == 'TRAIN':
#             outputs_dict = self.forwardtrain(
#                 predictions=predictions,
#                 targets=targets,
#                 backbone_outputs=backbone_outputs,
#                 losses_cfg=self.losses_cfg,
#                 img_size=img_size,
#                 compute_loss=False,
#             )
#             if hasattr(self, 'se_layer'):
#                 outputs_dict.update({'loss_se': predictions_se})
#             # return self.calculatelosses(
#             #     predictions=outputs_dict, 
#             #     targets=targets, 
#             #     losses_cfg=losses_cfg
#             # )
#             loss, losses_log_dict = self.calculatelosses(
#                 predictions=outputs_dict, 
#                 targets=targets, 
#                 losses_cfg=self.losses_cfg
#             )
#             return loss
#         return predictions
#     '''convert to onehot labels'''
#     def onehot(self, labels, num_classes):
#         # batch_size = labels.size(0)
#         batch_size = labels.shape[0]
#         labels_onehot = labels.new_zeros((batch_size, num_classes))
#         for i in range(batch_size):
#             hist = labels[i].float().histc(bins=num_classes, min=0, max=num_classes-1)
#             labels_onehot[i] = hist > 0
#         return labels_onehot

'''ENCNet'''
# https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/d37d2a17221d2681ad454958cf06a1065e9b1f7f/core/models/encnet.py#L13

class ENCNet(BaseSegmentor):
    def __init__(self, cfg, losses_cfg, mode):
        super(ENCNet, self).__init__(cfg, losses_cfg, mode)
        self.losses_cfg = losses_cfg
        align_corners, norm_cfg, act_cfg, head_cfg = self.align_corners, self.norm_cfg, self.act_cfg, cfg['head']
        # build encoding
        # --base structurs
        # self.bottleneck = nn.Sequential(
        #     nn.Conv2d(head_cfg['in_channels_list'][-1], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1, bias=False),
        #     BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
        #     BuildActivation(act_cfg),
        # )
        
        # self.histc = ops.HistogramFixedWidth(cfg['num_classes'])
        self.histc = ops.Histogram(cfg['num_classes'],  min=0.0, max=float(cfg['num_classes']-1))
        self.histc.set_device("CPU")

        self.bottleneck = nn.SequentialCell(
            nn.Conv2d(head_cfg['in_channels_list'][-1], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1, pad_mode='pad', has_bias=False),
            BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
            BuildActivation(act_cfg),
        )

        self.enc_module = ContextEncoding(
            in_channels=head_cfg['feats_channels'],
            num_codes=head_cfg['num_codes'],
            norm_cfg=norm_cfg,
            act_cfg=act_cfg,
        )
        # --extra structures
        self.extra_cfg = head_cfg['extra']
        if self.extra_cfg['add_lateral']:
            # self.lateral_convs = nn.ModuleList()
            self.lateral_convs = nn.CellList()
            for in_channels in head_cfg['in_channels_list'][:-1]:
                # self.lateral_convs.append(
                #     nn.Conv2d(in_channels, head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0),
                #     BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
                #     BuildActivation(act_cfg),
                # )
                self.lateral_convs.append(
                    nn.Conv2d(in_channels, head_cfg['feats_channels'], kernel_size=1, stride=1, padding=0, pad_mode='pad'),
                    BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
                    BuildActivation(act_cfg),
                )
            # self.fusion = nn.Sequential(
            #     nn.Conv2d(len(head_cfg['in_channels_list']) * head_cfg['feats_channels'], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1),
            #     BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
            #     BuildActivation(act_cfg),
            # )
            self.fusion = nn.SequentialCell(
                nn.Conv2d(len(head_cfg['in_channels_list']) * head_cfg['feats_channels'], head_cfg['feats_channels'], kernel_size=3, stride=1, padding=1, pad_mode='pad'),
                BuildNormalization(constructnormcfg(placeholder=head_cfg['feats_channels'], norm_cfg=norm_cfg)),
                BuildActivation(act_cfg),
            )
        if self.extra_cfg['use_se_loss']:
            # self.se_layer = nn.Linear(head_cfg['feats_channels'], cfg['num_classes'])
            self.se_layer = nn.Dense(head_cfg['feats_channels'], cfg['num_classes'])
        # build decoder
        # self.decoder = nn.Sequential(
        #     nn.Dropout2d(head_cfg['dropout']),
        #     nn.Conv2d(head_cfg['feats_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
        # )
        self.decoder = nn.SequentialCell(
            nn.Dropout2d(head_cfg['dropout']),
            nn.Conv2d(head_cfg['feats_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0, pad_mode='pad')
        )
        # build auxiliary decoder
        self.setauxiliarydecoder(cfg['auxiliary'])
        # freeze normalization layer if necessary
        if cfg.get('is_freeze_norm', False): self.freezenormalization()
        # layer names for training tricks
        self.layer_names = ['backbone_net', 'bottleneck', 'enc_module', 'decoder', 'lateral_convs', 'fusion', 'se_layer', 'auxiliary_decoder']
    '''forward'''
    def forward(self, x, targets=None, losses_cfg=None):
        # img_size = x.size(2), x.size(3)
        img_size = x.shape[2], x.shape[3]
        # feed to backbone network
        backbone_outputs = self.transforminputs(self.backbone_net(x), selected_indices=self.cfg['backbone'].get('selected_indices'))
        # feed to context encoding
        feats = self.bottleneck(backbone_outputs[-1])

        if self.extra_cfg['add_lateral']:
            # lateral_outs = [
            #     F.interpolate(lateral_conv(backbone_outputs[idx]), size=feats.shape[2:], mode='bilinear', align_corners=self.align_corners) for idx, lateral_conv in enumerate(self.lateral_convs)
            # ]
            lateral_outs = [
                ops.interpolate(lateral_conv(backbone_outputs[idx]), size=feats.shape[2:], mode='bilinear', align_corners=self.align_corners) for idx, lateral_conv in enumerate(self.lateral_convs)
            ]
            # feats = self.fusion(torch.cat([feats, *lateral_outs], dim=1))
            lateral_outs_cat = ops.cat(lateral_outs, axis=1)
            # feats = self.fusion(ops.cat([feats, *lateral_outs], axis=1))
            feats = self.fusion(ops.cat([feats, lateral_outs_cat], axis=1))

        encode_feats, feats = self.enc_module(feats)
        predictions_se = None
        if self.extra_cfg['use_se_loss']:
            predictions_se = self.se_layer(encode_feats)
        # feed to decoder
        predictions = self.decoder(feats)
        # forward according to the mode
        if self.mode == 'TRAIN':
            outputs_dict = self.forwardtrain(
                predictions=predictions,
                targets=targets,
                backbone_outputs=backbone_outputs,
                losses_cfg=self.losses_cfg,
                img_size=img_size,
                compute_loss=False,
            )
            if self.extra_cfg['use_se_loss']:
                outputs_dict.update({'loss_se': predictions_se})
                
            # return self.calculatelosses(
            #     predictions=outputs_dict, 
            #     targets=targets, 
            #     losses_cfg=losses_cfg
            # )
            
            loss, losses_log_dict = self.calculatelosses(
                predictions=outputs_dict, 
                targets=targets, 
                losses_cfg=self.losses_cfg
            )
            
            
            # loss = Tensor(1.0)

            return loss
        return predictions

    '''convert to onehot labels'''
    def onehot(self, labels, num_classes):
        # batch_size = labels.size(0)
        batch_size = labels.shape[0]
        labels_onehot = labels.new_zeros((batch_size, num_classes))
        for i in range(batch_size):
            # hist = labels[i].float().histc(bins=num_classes, min=0, max=num_classes-1)
            # range_op = Tensor([0.0, num_classes-1], luojianet.float32)
            # hist = self.histc(labels[i].float(), range_op)
            # hist = ops.histc(labels[i].float(),
            #         bins=num_classes, min=0,
            #         max=num_classes - 1)
            hist = self.histc(labels[i].float())
            hist = ops.stop_gradient(hist)

            labels_onehot[i] = hist > 0

        # labels_onehot = self.one_hot(labels, num_classes, Tensor(1.0), Tensor(0.0))

        return labels_onehot