import torch
import torch.nn as nn
import torch.nn.functional as F
from detection.layers import cat
from detection.modeling.neck.fpn import FPN
from detection.modeling.neck.rpn import RPN

from ..layers.grad_reverse import grad_reverse
from .backbone import build_backbone
from .da_head import Dis, Image_DA, Instance_DA
from .roi_heads import BoxHead


class DA_Net(nn.Module):
    def __init__(self, cfg):
        super(DA_Net, self).__init__()
        self.cfg = cfg
        self.out_indices = cfg.MODEL.BACKBONE.out_indices
        has_layer4 = cfg.MODEL.BACKBONE.HAS_LAYER4
        self.build_FPN = cfg.MODEL.FPN.FPN_ON

        self.backbone = build_backbone(cfg)
        
        if self.build_FPN:
            # fpn in_channels
            fpn_in_channels = []
            if not has_layer4:
                fpn_in_channels.append(self.backbone.out_channels)
            else:
                out_channels = [256, 512, 1024, 2048]
                for idx, is_out in enumerate(self.out_indices):
                    if is_out:
                        fpn_in_channels.append(out_channels[idx])
            self.fpn = FPN(cfg, in_channels=fpn_in_channels)
            rpn_in_channels = self.fpn.out_channels
            roi_in_channels = 256
        else:
            rpn_in_channels = self.backbone.out_channels
            if cfg.MODEL.BACKBONE.NAME == "vgg16":
                roi_in_channels = 512
            else:
                roi_in_channels = 2048
        self.rpn = RPN(cfg, in_channels=rpn_in_channels)
        self.box_head = BoxHead(cfg, in_channels=roi_in_channels)

        # DA
        self.RCNN_imageDA = Image_DA(in_channels=512)
        self.RCNN_instanceDA = Instance_DA(in_channels=4096)

    def forward(self, images, img_metas, targets=None, t_images=None, t_img_metas=None):
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        outputs = dict()
        loss_dict = dict()
        adv_loss = dict()
       
        features, s_adaptation_feats = self.backbone(images)
        # s_adaptation_feats = features
        if self.build_FPN:
            features = self.fpn(features)
        proposals, rpn_losses, s_rpn_logits = self.rpn(images, features, img_metas, targets)
        dets, box_losses, s_proposals, box_features, roi_features = self.box_head(features, proposals, img_metas, targets)

        device = features[0].device
        # domain
        if self.training and targets is not None:
            t_features, t_adaptation_feats = self.backbone(t_images)
            t_proposals, _, t_rpn_logits = self.rpn(t_images, t_features, t_img_metas, targets=None, is_da=True)
            _, _, t_proposals, t_box_features, t_roi_features = self.box_head(t_features, t_proposals, t_img_metas, targets=None, is_da=True)

            for i, (s_feat, t_feat) in enumerate(zip(s_adaptation_feats, t_adaptation_feats)):
                # source
                base_score = self.RCNN_imageDA(s_feat)
                base_label = torch.ones((base_score.size(0), base_score.size(2), base_score.size(3)), dtype=torch.long, device=device)
                base_prob = F.log_softmax(base_score, dim=1)
                DA_img_loss_cls = F.nll_loss(base_prob, base_label)

                instance_sigmoid = self.RCNN_instanceDA(box_features)
                instance_label = torch.ones(instance_sigmoid.shape, dtype=instance_sigmoid.dtype, device=device)
                DA_ins_loss_cls = F.binary_cross_entropy(instance_sigmoid, instance_label)

                consistency_prob = F.softmax(base_score, dim=1)[:, 0, :, :]
                consistency_prob = torch.mean(consistency_prob)
                consistency_prob = consistency_prob.repeat(instance_sigmoid.size())
                DA_cst_loss = F.mse_loss(instance_sigmoid, consistency_prob, reduction='sum')

                # target
                tgt_base_score = self.RCNN_imageDA(t_feat)
                tgt_base_label = torch.zeros((tgt_base_score.size(0), tgt_base_score.size(2), tgt_base_score.size(3)), dtype=torch.long, device=device)
                tgt_base_prob = F.log_softmax(tgt_base_score, dim=1)
                tgt_DA_img_loss_cls = F.nll_loss(tgt_base_prob, tgt_base_label)

                tgt_instance_sigmoid = self.RCNN_instanceDA(t_box_features)
                tgt_instance_label = torch.zeros(tgt_instance_sigmoid.shape, dtype=tgt_instance_sigmoid.dtype, device=device)
                tgt_DA_ins_loss_cls = F.binary_cross_entropy(tgt_instance_sigmoid, tgt_instance_label)

                tgt_consistency_prob = F.softmax(tgt_base_score, dim=1)[:, 1, :, :]
                tgt_consistency_prob = torch.mean(tgt_consistency_prob)
                tgt_consistency_prob = tgt_consistency_prob.repeat(tgt_instance_sigmoid.size())
                tgt_DA_cst_loss = F.mse_loss(tgt_instance_sigmoid, tgt_consistency_prob, reduction='sum')

                adv_loss.update({
                    's_img_cls' : DA_img_loss_cls,
                    's_ins_cls' : DA_ins_loss_cls,
                    's_cst_cls' : DA_cst_loss,
                    't_img_cls' : tgt_DA_img_loss_cls,
                    't_ins_cls' : tgt_DA_ins_loss_cls,
                    't_cst_cls' : tgt_DA_cst_loss
                })


        if self.training:
            loss_dict.update(rpn_losses)
            loss_dict.update(box_losses)
            if len(adv_loss) > 0:
                loss_dict['adv_loss'] = adv_loss
            return loss_dict, outputs
        return dets
