import torch
import torch.nn as nn
import torch.nn.functional as F
from detection.layers import cat, softmax_focal_loss
from detection.modeling.neck.fpn import FPN
from detection.modeling.neck.rpn import RPN

from ..layers.grad_reverse import grad_reverse
from .backbone import build_backbone
from .da_head import Dis, Image_DA, Instance_DA, NetD_gc, NetD_lc
from .roi_heads import SW_BoxHead


class SW_Net(nn.Module):
    def __init__(self, cfg):
        super(SW_Net, self).__init__()
        self.cfg = cfg
        self.out_indices = cfg.MODEL.BACKBONE.out_indices
        has_layer4 = cfg.MODEL.BACKBONE.HAS_LAYER4
        self.build_FPN = cfg.MODEL.FPN.FPN_ON
        self.gc = cfg.ADV.GC
        self.lc = cfg.ADV.LC

        self.backbone = build_backbone(cfg)
        
        if self.build_FPN:
            # fpn in_channels
            fpn_in_channels = []
            if not has_layer4:
                fpn_in_channels.append(self.backbone.out_channels)
            else:
                out_channels = [256, 512, 1024, 2048]
                for idx, is_out in enumerate(self.out_indices):
                    if is_out:
                        fpn_in_channels.append(out_channels[idx])
            self.fpn = FPN(cfg, in_channels=fpn_in_channels)
            rpn_in_channels = self.fpn.out_channels
            roi_in_channels = 256
        else:
            rpn_in_channels = self.backbone.out_channels
            if cfg.MODEL.BACKBONE.NAME == "vgg16":
                roi_in_channels = 512
            else:
                roi_in_channels = 2048
        self.rpn = RPN(cfg, in_channels=rpn_in_channels)
        self.box_head = SW_BoxHead(cfg, in_channels=roi_in_channels)

        # DA
        self.net_gc = NetD_gc(context=self.gc)
        self.net_lc = NetD_lc(context=self.lc)
        self.conv_ICR = nn.Conv2d(512, cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES, kernel_size=1, stride=1, padding=0)

        feat_d = 4096
        if self.lc:
            feat_d += 128
        if self.gc:
            feat_d += 128
        self.RCNN_instanceDA = Instance_DA(in_channels=feat_d)

    def forward(self, images, img_metas, targets=None, t_images=None, t_img_metas=None):
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        outputs = dict()
        loss_dict = dict()
        adv_loss = dict()
       
        features, s_adaptation_feats = self.backbone(images)
        # net_lc
        base_feat1 = s_adaptation_feats[0]
        if self.lc:
            d_pixel, _ = self.net_lc(grad_reverse(base_feat1, 1.0))
            if True:
                _, feat_pixel = self.net_lc(base_feat1)
        else:
            d_pixel = self.net_lc(grad_reverse(base_feat1, 1.0))
        # net_gc
        base_feat2 = s_adaptation_feats[1]
        if self.gc:
            domain_p, _ = self.net_gc(grad_reverse(base_feat2, 1.0))
            _, feat_gc = self.net_gc(base_feat2)
        else:
            domain_p = self.net_gc(grad_reverse(base_feat2))

        if self.build_FPN:
            features = self.fpn(features)
        proposals, rpn_losses, s_rpn_logits = self.rpn(images, features, img_metas, targets)
        dets, box_losses, s_proposals, box_features, instance_pooled_feat = self.box_head(
            features, proposals, img_metas, targets, feat_pixel=feat_pixel, feat_gc=feat_gc
        )


        device = features[0].device
        # domain
        if self.training and targets is not None:
            # source
                # ICR
            s_cls_feat = F.adaptive_avg_pool2d(base_feat2, output_size=(1,1))
            s_cls_feat = self.conv_ICR(s_cls_feat).squeeze(-1).squeeze(-1)
            s_labels = targets[0]['labels']
            target_one_hot = torch.zeros(s_cls_feat.shape, dtype=s_cls_feat.dtype, device=device)
            for l in s_labels:
                target_one_hot[:, l] = 1
            s_ICR_loss = F.binary_cross_entropy_with_logits(s_cls_feat, target_one_hot)

            instance_sigmoid = self.RCNN_instanceDA(instance_pooled_feat)
            instance_label = torch.ones((instance_sigmoid.size(0), instance_sigmoid.size(1)), dtype=instance_sigmoid.dtype, device=device)
            DA_ins_loss_cls = F.binary_cross_entropy(instance_sigmoid, instance_label)

                # global alignment loss
            s_domain_label = torch.zeros(domain_p.size(0), dtype=torch.long, device=device)
            s_domain_loss = 0.5 * softmax_focal_loss(domain_p, s_domain_label, gamma=5)
                # local alignment loss
            s_domain_loss_p = 0.5 * torch.mean(d_pixel ** 2)


            # target
            t_features, t_adaptation_feats = self.backbone(t_images)
            # net_lc
            tgt_base_feat1 = t_adaptation_feats[0]
            if self.lc:
                tgt_d_pixel, _ = self.net_lc(grad_reverse(tgt_base_feat1, 1.0))
                if True:
                    _, tgt_feat_pixel = self.net_lc(tgt_base_feat1)
            else:
                tgt_d_pixel = self.net_lc(grad_reverse(tgt_base_feat1, 1.0))
            # net_gc
            tgt_base_feat2 = t_adaptation_feats[1]
            if self.gc:
                tgt_domain_p, _ = self.net_gc(grad_reverse(tgt_base_feat2, 1.0))
                _, tgt_feat_gc = self.net_gc(tgt_base_feat2)
            else:
                tgt_domain_p = self.net_gc(grad_reverse(tgt_base_feat2))

            t_proposals, _, t_rpn_logits = self.rpn(t_images, t_features, t_img_metas, targets=None, is_da=True)
            _, _, t_proposals, t_box_features, tgt_instance_pooled_feat = self.box_head(
                t_features, t_proposals, t_img_metas, targets=None, feat_pixel=tgt_feat_pixel, feat_gc=tgt_feat_gc, is_da=True
            )

            tgt_cls_feat = self.conv_ICR(F.adaptive_avg_pool2d(tgt_base_feat2, output_size=(1,1))).squeeze(-1).squeeze(-1)

            tgt_instance_sigmoid = self.RCNN_instanceDA(tgt_instance_pooled_feat)
            tgt_instance_label = torch.zeros(tgt_instance_sigmoid.shape, dtype=tgt_instance_sigmoid.dtype, device=device)

            tgt_cls_score = self.box_head.box_predictor.cls_score(t_box_features)
            tgt_cls_prob = torch.softmax(tgt_cls_score, 1)

            tgt_cls_pre_label = tgt_cls_prob.argmax(1)
            tgt_cls_feat_sig = torch.sigmoid(tgt_cls_feat[0])
            target_weight = []
            for i in range(len(tgt_cls_pre_label)):
                label_i = tgt_cls_pre_label[i].item()
                if label_i  < self.cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES:
                    diff_value = torch.exp(1.0 * torch.abs(tgt_cls_feat_sig[label_i] - tgt_cls_prob[i][label_i])).item()
                    target_weight.append(diff_value)
                else:
                    target_weight.append(1.0)
            t_instance_loss_func = nn.BCELoss(weight=torch.Tensor(target_weight).view(-1,1).to(device=device))
            tgt_DA_ins_loss_cls = t_instance_loss_func(tgt_instance_sigmoid, tgt_instance_label)
            # tgt_DA_ins_loss_cls = F.binary_cross_entropy(tgt_instance_sigmoid, tgt_instance_label)
                
                # global alignment loss
            t_domain_label = torch.ones(tgt_domain_p.size(0), dtype=torch.long, device=device)
            t_domain_loss = 0.5 * softmax_focal_loss(tgt_domain_p, t_domain_label, gamma=5)
                # local alignment loss
            t_domain_loss_p = 0.5 * torch.mean((1 - tgt_d_pixel) ** 2)
            
            adv_loss.update({
                's_ICR_loss' : s_ICR_loss,
                's_ins_cls' : DA_ins_loss_cls,
                's_domain_loss' : s_domain_loss,
                's_domain_loss_p' : s_domain_loss_p,
                't_ins_cls' : tgt_DA_ins_loss_cls,
                't_domain_loss' : t_domain_loss,
                't_domain_loss_p' : t_domain_loss_p,
            })


        if self.training:
            loss_dict.update(rpn_losses)
            loss_dict.update(box_losses)
            if len(adv_loss) > 0:
                loss_dict['adv_loss'] = adv_loss
            return loss_dict, outputs
        return dets
