# ------------------------------------------------------------------------
# Copyright (c) Hitachi, Ltd. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
import argparse

import torch
from torch import nn


def get_args():
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--load_path', type=str, required=True,
    )
    parser.add_argument(
        '--save_path', type=str, required=True,
    )
    parser.add_argument(
        '--dataset', type=str, default='hico',
    )
    parser.add_argument(
        '--num_queries', type=int, default=300,
    )

    args = parser.parse_args()

    return args


def main(args):
    ps = torch.load(args.load_path)
    #len(obj_ids) = 80
    obj_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
               14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
               24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
               37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
               48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
               58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
               72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
               82, 84, 85, 86, 87, 88, 89, 90]

    # For no pair
    # obj_ids.append(91)
    for k in list(ps['model'].keys()):
        print(k)
        # if 'backbone' in k:
        #     del ps['model'][k]
    #         #ps['model'][k.replace('decoder', 'interaction_decoder')] = ps['model'][k].clone()
    #         ps['model'][k.replace('decoder', 'human_decoder')] = ps['model'][k].clone()
    #     if 'encoder' in k:
    #         ps['model'][k.replace('encoder', 'interaction_encoder')] = ps['model'][k].clone()

    # for i in range(4):    
    #     del ps['model']['input_proj.{}.0.weight'.format(i)]
    #     del ps['model']['input_proj.{}.0.bias'.format(i)]
    #     del ps['model']['input_proj.{}.1.weight'.format(i)]
    #     del ps['model']['input_proj.{}.1.bias'.format(i)]
    
    # ps['model']['sub_bbox_embed.layers.0.weight'] = ps['model']['bbox_embed.layers.0.weight'].clone()
    # ps['model']['sub_bbox_embed.layers.0.bias'] = ps['model']['bbox_embed.layers.0.bias'].clone()
    # ps['model']['sub_bbox_embed.layers.1.weight'] = ps['model']['bbox_embed.layers.1.weight'].clone()
    # ps['model']['sub_bbox_embed.layers.1.bias'] = ps['model']['bbox_embed.layers.1.bias'].clone()
    # ps['model']['sub_bbox_embed.layers.2.weight'] = ps['model']['bbox_embed.layers.2.weight'].clone()
    # ps['model']['sub_bbox_embed.layers.2.bias'] = ps['model']['bbox_embed.layers.2.bias'].clone()
    

    # ps['model']['obj_bbox_embed.layers.0.weight'] = ps['model']['bbox_embed.layers.0.weight'].clone()
    # ps['model']['obj_bbox_embed.layers.0.bias'] = ps['model']['bbox_embed.layers.0.bias'].clone()
    # ps['model']['obj_bbox_embed.layers.1.weight'] = ps['model']['bbox_embed.layers.1.weight'].clone()
    # ps['model']['obj_bbox_embed.layers.1.bias'] = ps['model']['bbox_embed.layers.1.bias'].clone()
    # ps['model']['obj_bbox_embed.layers.2.weight'] = ps['model']['bbox_embed.layers.2.weight'].clone()
    # ps['model']['obj_bbox_embed.layers.2.bias'] = ps['model']['bbox_embed.layers.2.bias'].clone()

    # ps['model']['matching_embed.weight'] = ps['model']['class_embed.weight'].clone()[:1]
    # ps['model']['matching_embed.bias']   = ps['model']['class_embed.bias'].clone()[:1]

    # ps['model']['query_embed.weight'] = ps['model']['query_embed.weight'].clone()[:args.num_queries]
    # ps['model']['query_embed_hm.weight'] = ps['model']['query_embed.weight'].clone()[:args.num_queries]
    #ps['model']['query_embed_interaction.weight'] = ps['model']['query_embed.weight'].clone()[:args.num_queries]

    for i in range(12):
        for j in range(3):
            ps['model']['sub_bbox_embed.{}.layers.{}.weight'.format(i, j)] = ps['model']['bbox_embed.{}.layers.{}.weight'.format(i, j)].clone()
            ps['model']['sub_bbox_embed.{}.layers.{}.bias'.format(i, j)] = ps['model']['bbox_embed.{}.layers.{}.bias'.format(i, j)].clone()
            
            ps['model']['transformer.decoder.sub_bbox_embed.{}.layers.{}.weight'.format(i, j)] = ps['model']['transformer.decoder.bbox_embed.{}.layers.{}.weight'.format(i, j)].clone()
            ps['model']['transformer.decoder.sub_bbox_embed.{}.layers.{}.bias'.format(i, j)]   = ps['model']['transformer.decoder.bbox_embed.{}.layers.{}.bias'.format(i, j)].clone()

        
        ps['model']['class_embed.{}.weight'.format(i)] = ps['model']['class_embed.{}.weight'.format(i)].clone()[obj_ids]#91
        ps['model']['class_embed.{}.bias'.format(i)] = ps['model']['class_embed.{}.bias'.format(i)].clone()[obj_ids]#91
        
        # l = nn.Linear(ps['model']['class_embed.{}.weight'.format(i)].shape[1], 1)
        # l.to(ps['model']['class_embed.{}.weight'.format(i)].device)
        # ps['model']['class_embed.{}.weight'.format(i)] = torch.cat((ps['model']['class_embed.{}.weight'.format(i)], 
        #                                                             l.weight.clone(), #没有客体
        #                                                 ))        
        # ps['model']['class_embed.{}.bias'.format(i)] = torch.cat((ps['model']['class_embed.{}.bias'.format(i)], 
        #                                                           l.bias.clone(), #没有客体
        #                                                 ))    
        ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)] = ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)].clone()[obj_ids]#91
        ps['model']['transformer.decoder.class_embed.{}.bias'.format(i)] = ps['model']['transformer.decoder.class_embed.{}.bias'.format(i)].clone()[obj_ids]#91
        
        # l = nn.Linear(ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)].shape[1], 1)
        # l.to(ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)].device)
        # ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)] = torch.cat((ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)], 
        #                                                                                 l.weight.clone(), #没有客体
        #                                                                     ))        
        # ps['model']['transformer.decoder.class_embed.{}.bias'.format(i)] = torch.cat((ps['model']['transformer.decoder.class_embed.{}.bias'.format(i)], 
        #                                                                             l.bias.clone(), #没有客体
        #                                                                     ))      

    for i in range(5):
        for j in range(3):
            ps['model']['transformer.encoder.sub_bbox_embed.{}.layers.{}.weight'.format(i,j)] = ps['model']['transformer.encoder.bbox_embed.{}.layers.{}.weight'.format(i, j)].clone()
            ps['model']['transformer.encoder.sub_bbox_embed.{}.layers.{}.bias'.format(i,j)] = ps['model']['transformer.encoder.bbox_embed.{}.layers.{}.bias'.format(i, j)].clone()

        ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)] = ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)].clone()[obj_ids]#91
        ps['model']['transformer.encoder.class_embed.{}.bias'.format(i)] = ps['model']['transformer.encoder.class_embed.{}.bias'.format(i)].clone()[obj_ids]#91
        
        # l = nn.Linear(ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)].shape[1], 1)
        # l.to(ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)].device)
        # ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)] = torch.cat((ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)], 
        #                                                                                 l.weight.clone(), #没有客体
        #                                                                     ))        
        # ps['model']['transformer.encoder.class_embed.{}.bias'.format(i)] = torch.cat((ps['model']['transformer.encoder.class_embed.{}.bias'.format(i)], 
        #                                                                                 l.bias.clone(), #没有客体
        #                                                                     ))       
    if args.dataset == 'vcoco':
        for i in range(12):
            l = nn.Linear(ps['model']['class_embed.{}.weight'.format(i)].shape[1], 1)
            l.to(ps['model']['class_embed.{}.weight'.format(i)].device)
            ps['model']['class_embed.{}.weight'.format(i)] = torch.cat((
                                                                ps['model']['class_embed.{}.weight'.format(i)], 
                                                                l.weight.clone(), 
                                                            ))
            ps['model']['class_embed.{}.bias'.format(i)] = torch.cat((
                                                                ps['model']['class_embed.{}.bias'.format(i)], 
                                                                l.bias.clone(), 
                                                            ))
            ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)] = torch.cat((
                                                                ps['model']['transformer.decoder.class_embed.{}.weight'.format(i)], 
                                                                l.weight.clone(), 
                                                            ))
            ps['model']['transformer.decoder.class_embed.{}.bias'.format(i)] = torch.cat((
                                                                ps['model']['transformer.decoder.class_embed.{}.bias'.format(i)], 
                                                                l.bias.clone(), 
                                                            ))
        for i in range(5):
            l = nn.Linear(ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)].shape[1], 1)
            l.to(ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)].device)
            ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)] = torch.cat((
                                                                ps['model']['transformer.encoder.class_embed.{}.weight'.format(i)], 
                                                                l.weight.clone(), 
                                                            ))
            ps['model']['transformer.encoder.class_embed.{}.bias'.format(i)] = torch.cat((
                                                                ps['model']['transformer.encoder.class_embed.{}.bias'.format(i)], 
                                                                l.bias.clone(), 
                                                            ))
     ############################################################################################
    print('turning.............')
    for k in list(ps['model'].keys()):
        print(k)
    print('save: '+args.save_path)
    torch.save(ps, args.save_path)


if __name__ == '__main__':
    args = get_args()
    main(args)
