from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import logging

import torch
import torch.nn as nn
import torch.nn.functional as F

import copy
import math
import random
import models
from models.attention import get_cross_attention
from models.interNet import build_pose_net
from models.hoiNet import build_hoi_net
from torchvision.ops.boxes import box_iou
from utils.box_ops import recover_boxes

BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)


class DeConv(nn.Module):
    def __init__(self, cfg): 
        super().__init__() 
        extra = cfg.MODEL.EXTRA
        self.deconv_with_bias = extra.DECONV_WITH_BIAS

        self.heatmap_size = cfg.MODEL.HEATMAP_SIZE
        self.trans_size = cfg.MODEL.TRANS_SIZE
        
        mod = self.heatmap_size[0] // self.trans_size[0]
        self.layer_num = int(math.log(mod, 2))
        
        self.deconv_layers = nn.ModuleList(self._make_deconv_layer(
            extra.NUM_DECONV_LAYERS,   # 1
            extra.NUM_DECONV_FILTERS,  # [d_model]
            extra.NUM_DECONV_KERNELS,  # [4]
        ) for _ in range(self.layer_num))
        
    def _get_deconv_cfg(self, deconv_kernel, index):
        if deconv_kernel == 4:
            padding = 1
            output_padding = 0
        elif deconv_kernel == 3:
            padding = 1
            output_padding = 1
        elif deconv_kernel == 2:
            padding = 0
            output_padding = 0

        return deconv_kernel, padding, output_padding
        
    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'

        layers = []
        for i in range(num_layers):
            kernel, padding, output_padding = \
                self._get_deconv_cfg(num_kernels[i], i)

            planes = num_filters[i]
            layers.append(
                nn.ConvTranspose2d(
                    in_channels=planes,
                    out_channels=planes,
                    kernel_size=kernel,
                    stride=2,
                    padding=padding,
                    output_padding=output_padding,
                    bias=self.deconv_with_bias))
            layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
            layers.append(nn.ReLU(inplace=True))
            self.inplanes = planes

        return nn.Sequential(*layers)
    
    def forward(self, x):
        for layer in self.deconv_layers:
            x = layer(x)
        return x


class ResBlock2d(nn.Module):
    '''
    basic block
    '''
    def __init__(self, in_features,out_features, kernel_size=1, padding=0):
        super(ResBlock2d, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
                               padding=padding)
        self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
                               padding=padding)
        if out_features != in_features:
            self.channel_conv = nn.Conv2d(in_features,out_features,1)
        # self.norm1 = nn.BatchNorm2d(in_features)
        # self.norm2 = nn.BatchNorm2d(in_features)
        self.relu = nn.ReLU()
    def forward(self, x):
        # out = self.norm1(x)
        # out = self.relu(out)
        out = self.conv1(x)
        # out = self.norm2(out)
        out = self.relu(out)
        out = self.conv2(out)
        if self.in_features != self.out_features:
            out += self.channel_conv(x)
        else:
            out += x
        return out

class SameBlock2d(nn.Module):
    '''
    basic block
    '''
    def __init__(self, in_features, out_features, kernel_size=1, padding=0):
        super(SameBlock2d, self).__init__()
        self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding)
        # self.norm = nn.BatchNorm2d(out_features)
        self.relu = nn.ReLU()
    def forward(self, x):
        out = self.conv(x)
        # out = self.norm(out)
        out = self.relu(out)
        return out


class MLP_FC(nn.Module):
    def __init__(self, in_features, hidden_features, out_features) -> None:
        super(MLP_FC, self).__init__()
        self.same1 = SameBlock2d(in_features=in_features, out_features=hidden_features)
        self.res = ResBlock2d(in_features=hidden_features, out_features=hidden_features)
        self.same2 = SameBlock2d(in_features=hidden_features, out_features=out_features)

    def forward(self, x):
        out = self.same1(x)
        out = self.res(out)
        out = self.same2(out)
        return out


class MLP_Fusion(nn.Module):
    def __init__(self, cfg):
        super(MLP_Fusion, self).__init__()
        self.pose_dim = 96 # cfg.XXX
        self.hoi_dim = 256 # cfg.XXX
        self.fusion_hidden_dim = 512 # cfg.xxx
        self.convs = MLP_FC(self.pose_dim+self.hoi_dim, self.fusion_hidden_dim, self.pose_dim)
        
    def forward(self, cat_feat):
        out = self.convs(cat_feat)
        return out

class MLP_FC_2(nn.Module):
    def __init__(self, in_features, hidden_features, out_features) -> None:
        super(MLP_FC_2, self).__init__()
        self.same1 = SameBlock2d(in_features=in_features, out_features=hidden_features, kernel_size=3, padding=1)
        self.res = ResBlock2d(in_features=hidden_features, out_features=hidden_features, kernel_size=3, padding=1)
        self.same2 = SameBlock2d(in_features=hidden_features, out_features=out_features, kernel_size=3, padding=1)

    def forward(self, x):
        out = self.same1(x)
        out = self.res(out)
        out = self.same2(out)
        return out

class MLP_Fusion_2(nn.Module):
    def __init__(self, cfg):
        super(MLP_Fusion_2, self).__init__()
        self.pose_dim = 96 # cfg.XXX
        self.fusion_hidden_dim = 512 # cfg.xxx
        self.convs = MLP_FC_2(self.pose_dim, self.fusion_hidden_dim, self.pose_dim)
        
    def forward(self, cat_feat):
        out = self.convs(cat_feat)
        return out

class PEHO(nn.Module):

    def __init__(self, cfg, object_to_target, is_train, **kwargs):
        super(PEHO, self).__init__()
        
        self.poseNet = build_pose_net(cfg)
        self.hoiNet = build_hoi_net(cfg, object_to_target)

        extra = cfg.MODEL.EXTRA
        d_model = cfg.MODEL.DIM_MODEL
        cross_encoder_layers_num = 2

        self.cross_attention = get_cross_attention(cfg, cross_encoder_layers_num)
        self.vec2down = nn.Sequential(
            nn.Linear(256,96),
            nn.ReLU()
        )
        
        self.final_layer_hoi = nn.Conv2d(
            in_channels=d_model,
            out_channels=cfg['MODEL']['NUM_JOINTS'],
            kernel_size=extra['FINAL_CONV_KERNEL'],
            stride=1,
            padding=1 if extra['FINAL_CONV_KERNEL'] == 3 else 0
        )
        
        self.upsample_layer_hoi = DeConv(cfg)
        self.mlp_fusion = MLP_Fusion(cfg)

        self.vec_fc = nn.Sequential(
            nn.Linear(256, 64*48),
            nn.ReLU()
        )
        self.mlp_fusion2 = MLP_Fusion_2(cfg)

    def _load_final_layer(self):
        checkpoint_file = 'checkpoints/pose_hrnet_inter2.pth'
        ckpt_state_dict = torch.load(checkpoint_file, map_location=torch.device('cpu'))
        existing_state_dict = {}
        for name, m in ckpt_state_dict.items():
            if 'final_layer' in name and 'intraNet' not in name:
                now_name = 'final_layer_hoi.' + name.split('.')[-1]
                existing_state_dict[now_name] = m
                logger.info(f"load interNet's final_layer weight of {name} to {now_name} of PEHO")
            elif 'upsample_layer' in name and 'intraNet' not in name:
                now_name = 'upsample_layer_hoi.' + name.split('.')[-1]
                existing_state_dict[now_name] = m
                logger.info(f"load interNet's upsample_layer weight of {name} to {now_name} of PEHO")

        self.load_state_dict(existing_state_dict, strict=False)
    
    def match_by_box(self, hoi_target, hoi_tokens):
        """使得Pose_tokens包含Hoi_tokens, 舍弃不对应的Hoi_tokens且顺序对应Pose"""
        boxes_pose = hoi_target['boxes_pose']
        boxes_h = hoi_target['boxes_h']
        assert boxes_h.shape[0] == hoi_tokens.shape[0]
        # boxes_h = torch.unique(hoi_target['boxes_h'], dim=0, sorted=False)
        # boxes_h = recover_boxes(boxes_h, hoi_target['size'])

        # assert boxes_h.shape[0] == hoi_tokens.shape[0]
        # if boxes_pose.shape[0] < boxes_h.shape[0]:
        #     _, hoi_with_pose_idx = torch.nonzero(torch.ge(box_iou(boxes_pose, boxes_h), 0.99)).unbind(1)
        #     hoi_tokens = hoi_tokens[hoi_with_pose_idx]
        #     boxes_h = boxes_h[hoi_with_pose_idx]

        ious = box_iou(boxes_h, boxes_pose)
        gt = torch.ge(ious, 0.99)

        # get hoi num per person
        hoi_num = [ torch.nonzero(gt[:,p_idx]).shape[0] for p_idx in range(gt.shape[1])]

        _, pose_hoi_idx = torch.nonzero(gt).unbind(1)
        assert pose_hoi_idx.size(0) != 0
        
        # 顺序对应
        hoi_idx = {}
        for i, val in enumerate(pose_hoi_idx.cpu().numpy().tolist()):
            hoi_idx[i] = val 
        hoi_idx = sorted(hoi_idx.items(), key=lambda item:item[1])
        hoi_idx = [item[0] for item in hoi_idx]
        # return hoi_tokens[hoi_idx]
        return hoi_tokens[hoi_idx], hoi_num

    def match_by_hoi(self, pose_hoi_mark, hoi_tokens):
        """根据hoi token是否为0(有效token)进行删减"""
        valid_hoi_idx = torch.nonzero(torch.amax(hoi_tokens, dim=-1)).flatten()
        hoi_tokens = hoi_tokens[valid_hoi_idx]
        pose_hoi_idx = pose_hoi_mark[valid_hoi_idx]
        return pose_hoi_idx, hoi_tokens

    def fusion_layer(self, spatial, vector):
        vector = vector.unsqueeze(1).unsqueeze(2) # 256 -> 256,1,1
        _, H, W = spatial.shape
        attribute = vector.repeat(1,H,W) # D,1,1 -> D,H,W
        spatial = torch.cat([spatial, attribute],dim=0).unsqueeze(0) # 1,C+D,H,W
        out = self.mlp_fusion(spatial)
        return out

    def fusion(self, hoi_tokens, hoi_num, x_multi_high):
        assert len(hoi_num)==x_multi_high.shape[0]
        counter = 0
        out = []
        for i,hoi_n in enumerate(hoi_num):
            if hoi_n != 0:
                hoi_token = hoi_tokens[counter:counter+hoi_n]
                rand_idx = random.randint(0,hoi_n-1)
                # fused_pose = self.fusion_layer(x_multi_high[i], hoi_token[rand_idx])
                fused_pose = self.fusion_layer(x_multi_high[i], hoi_token[rand_idx]) + x_multi_high[i].unsqueeze(0)
            else:
                fused_pose = x_multi_high[i].unsqueeze(0)
            counter += hoi_n
            out.append(fused_pose)
        return torch.cat(out, dim=0)

    def masked_fusion_layer(self, spatial, vector):
        # vector 256
        vector = vector.unsqueeze(0)
        _, H, W = spatial.shape
        mask = self.vec_fc(vector) # 256 -> 1,H,W
        mask = mask.reshape(1, 1, H, W)
        spatial = torch.mul(mask, spatial.unsqueeze(0)) # 1,C,H,W
        out = self.mlp_fusion2(spatial)
        return out

    def fusion_turbo(self, hoi_tokens, hoi_num, x_multi_high):
        assert len(hoi_num) == x_multi_high.shape[0]
        counter = 0
        out = []
        for i, hoi_n in enumerate(hoi_num):
            if hoi_n != 0:
                hoi_token = hoi_tokens[counter:counter+hoi_n]
                rand_idx = random.randint(0,hoi_n-1)
                # fused_pose = self.fusion_layer(x_multi_high[i], hoi_token[rand_idx])
                fused_pose = self.masked_fusion_layer(x_multi_high[i], hoi_token[rand_idx]) + x_multi_high[i].unsqueeze(0)
            else:
                fused_pose = x_multi_high[i].unsqueeze(0)
            counter += hoi_n
            out.append(fused_pose)
        return torch.cat(out, dim=0)

    def forward(self, sample):
        outputs = {'single': None, 'multi': None}
        x_list, hoi_list, mark_list = sample['x'], sample['hoi'], sample['length']

        # >>>>>>>>>>>>>>>>>>>>>>>>> Pose Net <<<<<<<<<<<<<<<<<<<<<<<<<
        outputs = self.poseNet({'x': sample['x'], 'length': sample['length']})
        # outputs = self.poseNet(x_list)
        outputs['pose_hoi'] = self.final_layer_hoi(outputs['x_multi_high'])
        
        # >>>>>>>>>>>>>>>>>>>>>>>>> HOI Net <<<<<<<<<<<<<<<<<<<<<<<<<
        interaction_loss = 0
        if any(hoi_list):   # batch至少存在一个有效HOI
            hoi_output, hoi_tokens_list, interaction_loss = self.hoiNet(hoi_list)
            # hoi_tokens_list, interaction_loss = self.hoiNet(sample['src_img'], hoi_list)
        assert len(hoi_tokens_list)  == len(hoi_list) == len(mark_list)

        # >>>>>>>>>>>>>>>>>>>>>>>>> Pose + Hoi <<<<<<<<<<<<<<<<<<<<<<<<<
        """Pose Token和Hoi_token进行匹配, 往Pose token中加入hoi token
        0: 人  5     hoi 5(6)
        1: >>>>>>>>>>>>>>>>>>>>>根据boxes iou是否为1进行匹配
        2: 3            3 (4)   根据hoi token是否为0进行删减
        3: 3            3
        一个人对应N个HOI
        """
        outputs['pose_hoi'] = []
        left = 0
        pose_low, pose_high = outputs['x_multi_low'], outputs['x_multi_high']
        for idx, hoi_tokens in enumerate(hoi_tokens_list): # hoi_token_list: img_idx,hoi_n,256
            # x_multi_low (person_n,96,16,12)
            mark = mark_list[idx]
            right = left + int(mark.size(0))
            x_multi_low, x_multi_high = pose_low[left:right], pose_high[left:right]
            pose_hoi_mark = torch.nonzero(mark==1).flatten()
            # => cross attn
            # # NOTE: 找出对应有HOI的人，并找出N个与之对应的HOI
            # if pose_hoi_mark.size(0) != 0 and hoi_tokens is not None:    # pose有HOI且HOI不为空
            #     # ==> 根据boxes iou是否为1进行匹配(删掉没有Pose的Hoi_token)
            #     hoi_tokens, hoi_num = self.match_by_box(hoi_list[idx], hoi_tokens)
                
            #     # assert pose_hoi_mark.shape[0] == hoi_tokens.shape[0]
            #     # pose_hoi_idx, hoi_tokens = self.match_by_hoi(pose_hoi_mark, hoi_tokens)
                
            #     hoi_tokens = self.vec2down(hoi_tokens).unsqueeze(0).unsqueeze(3).unsqueeze(4) # n,256 -> n,96 -> 1,n,96,1,1
            #     # assert pose_hoi_idx.shape[0] == hoi_tokens.shape[1]
            #     # x_hoi = self.cross_attention(x_multi_low[pose_hoi_idx], hoi_tokens)
            #     # x_hoi = self.upsample_layer_hoi(x_hoi)

            #     # x_multi_high[pose_hoi_idx] = x_multi_high[pose_hoi_idx] + x_hoi
            #     # outputs['pose_hoi'].append(self.final_layer_hoi(x_multi_high))

            #     x_hoi = self.cross_attention(x_multi_low.unsqueeze(0), hoi_tokens)
            #     x_hoi = self.upsample_layer_hoi(x_hoi.squeeze(0))

            #     x_multi_high = x_multi_high + x_hoi
            #     outputs['pose_hoi'].append(self.final_layer_hoi(x_multi_high))
            # else:
            #     outputs['pose_hoi'].append(outputs['multi'][left:right])

            # => fusion 1
            # if pose_hoi_mark.size(0) != 0 and hoi_tokens is not None:    # pose有HOI且HOI不为空
            #     hoi_tokens, hoi_num = self.match_by_box(hoi_list[idx], hoi_tokens)
            #     x_hoi = self.fusion(hoi_tokens, hoi_num, x_multi_high) # 96*48*64
            #     outputs['pose_hoi'].append(self.final_layer_hoi(x_hoi))
            # else:
            #     outputs['pose_hoi'].append(outputs['multi'][left:right])

            # => fusion 2
            if pose_hoi_mark.size(0) != 0 and hoi_tokens is not None:    # pose有HOI且HOI不为空
                hoi_tokens, hoi_num = self.match_by_box(hoi_list[idx], hoi_tokens)
                x_hoi = self.fusion_turbo(hoi_tokens, hoi_num, x_multi_high) # 96*48*64
                outputs['pose_hoi'].append(self.final_layer_hoi(x_hoi))
            else:
                outputs['pose_hoi'].append(outputs['multi'][left:right])

            # outputs['pose_hoi'].append(self.final_layer_hoi(x_multi_high))
            left = right

        # outputs.pop('x_multi_low')
        # outputs.pop('x_multi_high')
        outputs['pose_hoi'] =  torch.cat(outputs['pose_hoi'], dim=0)
        return outputs, interaction_loss


def get_pose_net(cfg, object_to_target, **kwargs):
    
    model = PEHO(cfg, object_to_target, **kwargs)
    model._load_final_layer()

    return model