import torch
import torch.nn as nn
import torch.nn.functional as F
from detection.layers import cat
from detection.modeling.neck.fpn import FPN
from detection.modeling.neck.rpn import RPN

from ..layers.grad_reverse import grad_reverse
from .backbone import build_backbone
from .da_head import Dis
from .roi_heads import BoxHead


class SAPNet(nn.Module):
    def __init__(self, cfg):
        super(SAPNet, self).__init__()
        self.cfg = cfg
        self.out_indices = cfg.MODEL.BACKBONE.out_indices
        has_layer4 = cfg.MODEL.BACKBONE.HAS_LAYER4
        self.build_FPN = cfg.MODEL.FPN.FPN_ON

        self.backbone = build_backbone(cfg)
        
        if self.build_FPN:
            # fpn in_channels
            fpn_in_channels = []
            if not has_layer4:
                fpn_in_channels.append(self.backbone.out_channels)
            else:
                out_channels = [256, 512, 1024, 2048]
                for idx, is_out in enumerate(self.out_indices):
                    if is_out:
                        fpn_in_channels.append(out_channels[idx])
            self.fpn = FPN(cfg, in_channels=fpn_in_channels)
            rpn_in_channels = self.fpn.out_channels
            roi_in_channels = 256
        else:
            rpn_in_channels = self.backbone.out_channels
            if cfg.MODEL.BACKBONE.NAME == "vgg16":
                roi_in_channels = 512
            else:
                roi_in_channels = 2048
        self.rpn = RPN(cfg, in_channels=rpn_in_channels)
        self.box_head = BoxHead(cfg, in_channels=roi_in_channels)

        # DA
        self.enable_adaptation = len(cfg.DATASETS.TARGETS) > 0
        self.ada_layers = [False] * 3
        if self.enable_adaptation:
            self.ada_layers = cfg.ADV.LAYERS
            dis_model = cfg.ADV.DIS_MODEL

            assert len(list(filter(lambda x: x, self.ada_layers))) == len(dis_model)

            # self.netD = netD()
            # self.netD = D(cfg, in_channels)

            self.dis_list = nn.ModuleList()
            for model_config in dis_model:
                dis = Dis(cfg, **model_config)
                print(dis)
                self.dis_list += [
                    dis
                ]

    def forward(self, images, img_metas, targets=None, t_images=None, t_img_metas=None):
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        outputs = dict()
        loss_dict = dict()
        adv_loss = dict()
       
        features, s_adaptation_feats = self.backbone(images)
        # s_adaptation_feats = features
        if self.build_FPN:
            features = self.fpn(features)
        proposals, rpn_losses, s_rpn_logits = self.rpn(images, features, img_metas, targets)
        dets, box_losses, s_proposals, box_features, roi_features = self.box_head(features, proposals, img_metas, targets)

        # domain
        if self.enable_adaptation and self.training and t_images is not None:
            t_features, t_adaptation_feats = self.backbone(t_images)
            # t_adaptation_feats = t_features

            t_proposals, _, t_rpn_logits = self.rpn(t_images, t_features, t_img_metas, targets=None, is_da=True)

            _, _, t_proposals, t_box_features, t_roi_features = self.box_head(t_features, t_proposals, t_img_metas, targets=None, is_da=True)

            device = features[0].device
            for i, (s_feat, t_feat, netD) in enumerate(zip(s_adaptation_feats, t_adaptation_feats, self.dis_list)):
                s_domain_logits, s_domain_logits_list = netD(grad_reverse(s_feat, 1.0), grad_reverse(cat(s_rpn_logits), 1.0))
                t_domain_logits, t_domain_logits_list = netD(grad_reverse(t_feat, 1.0), grad_reverse(cat(t_rpn_logits), 1.0))
                loss_func = netD.loss_func
                loss_weight = netD.loss_weight
                num_windows = netD.num_windows

                s_domain_loss = loss_func(s_domain_logits, torch.zeros(s_domain_logits.size(0), dtype=torch.long, device=device))
                t_domain_loss = loss_func(t_domain_logits, torch.ones(t_domain_logits.size(0), dtype=torch.long, device=device))

                adv_loss.update({
                    's_domain_loss%d' % i: s_domain_loss * loss_weight,
                    't_domain_loss%d' % i: t_domain_loss * loss_weight,
                    # 's_domain_list_loss%d' % i: list_weights * sum(sigmoid_focal_loss(la, torch.zeros_like(la), gamma=gamma) for la in s_domain_logits_list) * loss_weight,
                    # 't_domain_list_loss%d' % i: list_weights * sum(sigmoid_focal_loss(la, torch.ones_like(la), gamma=gamma) for la in t_domain_logits_list) * loss_weight,
                })


        if self.training:
            loss_dict.update(rpn_losses)
            loss_dict.update(box_losses)
            if len(adv_loss) > 0:
                loss_dict['adv_loss'] = adv_loss
            return loss_dict, outputs
        return dets
