# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from torch import Tensor
from .decode_head import BaseDecodeHead
from ..losses import accuracy
from ..utils import resize
from typing import List, Tuple

from mmcv.cnn import ConvModule
from mmseg.registry import MODELS
from mmseg.registry import MODELS
from mmseg.utils import ConfigType, SampleList


@MODELS.register_module()
class MultiOutputHead(BaseDecodeHead):
    """Fully Convolution Networks for Semantic Segmentation.

    This head is implemented of `FCNNet <https://arxiv.org/abs/1411.4038>`_.

    Args:
        num_convs (int): Number of convs in the head. Default: 2.
        kernel_size (int): The kernel size for convs in the head. Default: 3.
        concat_input (bool): Whether concat the input and output of convs
            before classification layer.
        dilation (int): The dilation rate for convs in the head. Default: 1.
    """

    def __init__(self,
                 num_convs=2,
                 kernel_size=3,
                 concat_input=True,
                 dilation=1,
                 **kwargs):
        assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int)
        self.num_convs = num_convs
        self.concat_input = concat_input
        self.kernel_size = kernel_size
        super().__init__(**kwargs)
        if num_convs == 0:
            assert self.in_channels == self.channels

        
        self.conv_seg_lad = nn.Conv2d(self.channels, self.out_channels, kernel_size=1)
        self.conv_seg_lcx = nn.Conv2d(self.channels, self.out_channels, kernel_size=1)

        conv_padding = (kernel_size // 2) * dilation
        convs = []
        convs.append(
            ConvModule(
                self.in_channels,
                self.channels,
                kernel_size=kernel_size,
                padding=conv_padding,
                dilation=dilation,
                conv_cfg=self.conv_cfg,
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg))
        for i in range(num_convs - 1):
            convs.append(
                ConvModule(
                    self.channels,
                    self.channels,
                    kernel_size=kernel_size,
                    padding=conv_padding,
                    dilation=dilation,
                    conv_cfg=self.conv_cfg,
                    norm_cfg=self.norm_cfg,
                    act_cfg=self.act_cfg))
        if num_convs == 0:
            self.convs = nn.Identity()
            self.convs_lad = nn.Identity()
            self.convs_lcx = nn.Identity()
        else:
            self.convs = nn.Sequential(*convs)
            self.convs_lad = nn.Sequential(*convs)
            self.convs_lcx = nn.Sequential(*convs)


        if self.concat_input:
            self.conv_cat = ConvModule(
                self.in_channels + self.channels,
                self.channels,
                kernel_size=kernel_size,
                padding=kernel_size // 2,
                conv_cfg=self.conv_cfg,
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg)

    def cls_seg(self, feat_lad, feat_lcx):
        """Classify each pixel."""
        if self.dropout is not None:
            feat_lad = self.dropout(feat_lad)
            feat_lcx = self.dropout(feat_lcx)
        output_lad = self.conv_seg_lad(feat_lad)
        output_lcx = self.conv_seg_lcx(feat_lcx)
        return output_lad,output_lcx


    def loss_by_feat(self, seg_logits: Tensor,
                        batch_data_samples: SampleList) -> dict:
        """Compute segmentation loss.

        Args:
            seg_logits (Tensor): The output from decode head forward function.
            batch_data_samples (List[:obj:`SegDataSample`]): The seg
                data samples. It usually includes information such
                as `metainfo` and `gt_sem_seg`.

        Returns:
            dict[str, Tensor]: a dictionary of loss components
        """

        seg_label = self._stack_batch_gt(batch_data_samples)
        loss = dict()

        seg_logits = resize(
            input=seg_logits,
            size=seg_label.shape[2:],
            mode='bilinear',
            align_corners=self.align_corners)
        
        if self.sampler is not None:
            seg_weight = self.sampler.sample(seg_logits, seg_label)
        else:
            seg_weight = None
        seg_label = seg_label.squeeze(1)

        if not isinstance(self.loss_decode, nn.ModuleList):
            losses_decode = [self.loss_decode]
        else:
            losses_decode = self.loss_decode

        for i in range(seg_label.shape[1]):
            for loss_decode in losses_decode:
                if loss_decode.loss_name not in loss:
                    loss[loss_decode.loss_name] = loss_decode(
                        seg_logits[:,i*2:i*2+2],
                        seg_label[:,i],
                        weight=seg_weight,
                        ignore_index=self.ignore_index)
                else:
                    loss[loss_decode.loss_name] += loss_decode(
                        seg_logits[:,i*2:i*2+2],
                        seg_label[:,i],
                        weight=seg_weight,
                        ignore_index=self.ignore_index)
            # if 'acc_seg' not in loss:
            #     loss['acc_seg'] = accuracy(
            #         seg_logits[:,i*2:i*2+2], seg_label[:,i], ignore_index=self.ignore_index)
            # else:
            #     loss['acc_seg'] += accuracy(
            #         seg_logits[:,i*2:i*2+2], seg_label[:,i], ignore_index=self.ignore_index)
        return loss

    def _forward_feature(self, inputs):
        """Forward function for feature maps before classifying each pixel with
        ``self.cls_seg`` fc.

        Args:
            inputs (list[Tensor]): List of multi-level img features.

        Returns:
            feats (Tensor): A tensor of shape (batch_size, self.channels,
                H, W) which is feature map for last layer of decoder head.
        """
        x = self._transform_inputs(inputs)
        feats_lad = self.convs_lad(x)
        feats_lcx = self.convs_lcx(x)

        if self.concat_input:
            feats = self.conv_cat(torch.cat([x, feats], dim=1))

        return feats_lad, feats_lcx

    def forward(self, inputs):
        """Forward function."""
        output_lad, output_lcx = self._forward_feature(inputs)
        output_lad, output_lcx = self.cls_seg(output_lad, output_lcx)
        output = torch.cat([output_lad, output_lcx], dim=1)
        return output

    