import numpy as np
from sklearn.manifold import TSNE
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import json

import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import collections
import torch
import torch.nn as nn
import torch.optim as optim
import random

#from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from torch.autograd import Variable
import torch.utils.data as Data
from roi_data_layer.roidb import combined_roidb, rank_roidb_ratio, filter_class_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
    adjust_learning_rate, save_checkpoint, clip_gradient
from model.faster_rcnn.resnet import resnet
import pickle
from datasets.metadata import MetaDataset, COCO_MetaDataset
from collections import OrderedDict

def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(description='Train Meta R-CNN network')
    # Define training data and Model
    parser.add_argument('--dataset', dest='dataset',
                        help='training dataset:coco2017,coco,pascal_07_12',
                        default='pascal_voc_0712', type=str)
    parser.add_argument('--net', dest='net',
                        help='metarcnn',
                        default='metarcnn', type=str)
    parser.add_argument('--frcn_joint', dest='frcn_joint',
                        help='FRCN+joint',
                        default=False, type=bool)
    # Define display and save dir
    parser.add_argument('--start_epoch', dest='start_epoch',
                        help='starting epoch',
                        default=1, type=int)
    parser.add_argument('--epochs', dest='max_epochs',
                        help='number of epochs to train',
                        default=21, type=int)
    parser.add_argument('--disp_interval', dest='disp_interval',
                        help='number of iterations to display',
                        default=100, type=int)
    parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
                        help='number of iterations to display',
                        default=10000, type=int)
    parser.add_argument('--save_dir', dest='save_dir',
                        help='directory to save models', default="./models",
                        type=str)
    # Define training parameters
    parser.add_argument('--nw', dest='num_workers',
                        help='number of worker to load data',
                        default=0, type=int)
    parser.add_argument('--cuda', dest='cuda', default=True, type=bool,
                        help='whether use CUDA')
    parser.add_argument('--bs', dest='batch_size',
                        help='batch_size',
                        default=1, type=int)
    parser.add_argument('--cag', dest='class_agnostic', default=False, type=bool,
                        help='whether perform class_agnostic bbox regression')
    parser.add_argument('--tfa', dest='tfa', default=False, type=bool,
                        help='improved two phase fine-tuning approach')
    parser.add_argument('--cosine', dest='cosine', default=False, type=bool,
                        help='use cosine classification subnet')
    parser.add_argument('--local', dest='local', default=False, type=bool,
                        help='add local feature metric')
    parser.add_argument('--sim', dest='sim', default=False, type=bool,
                        help='loss improved by similarity')
    # Define meta parameters
    parser.add_argument('--meta_train', dest='meta_train', default=False, type=bool,
                        help='whether perform meta training')
    parser.add_argument('--meta_loss', dest='meta_loss', default=False, type=bool,
                        help='whether perform adding meta loss')
    parser.add_argument('--phase', dest='phase',
                        help='the phase of training process',
                        default=1, type=int)
    parser.add_argument('--shots', dest='shots',
                        help='the number meta input of PRN network',
                        default=10, type=int)
    parser.add_argument('--meta_type', dest='meta_type', default=1, type=int,
                        help='choose which sets of metaclass')
    parser.add_argument('--save_checkpoint', dest='save_checkpoint', default=True, type=bool)
    # Define nuclear norm parameters
    parser.add_argument('--nuc_loss', dest='nuc_loss', default=False, type=bool,
                        help='whether perform adding nuc loss')
    # config optimization
    parser.add_argument('--o', dest='optimizer',
                        help='training optimizer',
                        default="sgd", type=str)
    parser.add_argument('--lr', dest='lr',
                        help='starting learning rate',
                        default=0.001, type=float)
    parser.add_argument('--lr_decay_step', dest='lr_decay_step',
                        help='step to do learning rate decay, unit is epoch',
                        default=4, type=int)
    parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
                        help='learning rate decay ratio',
                        default=0.1, type=float)
    # set training session
    parser.add_argument('--s', dest='session',
                        help='training session',
                        default=1, type=int)
    # resume trained model
    parser.add_argument('--r', dest='resume',
                        help='resume checkpoint or not',
                        default=False, type=bool)
    parser.add_argument('--checksession', dest='checksession',
                        help='checksession to load model',
                        default=1, type=int)
    parser.add_argument('--checkepoch', dest='checkepoch',
                        help='checkepoch to load model',
                        default=10, type=int)
    parser.add_argument('--checkpoint', dest='checkpoint',
                        help='checkpoint to load model',
                        default=21985, type=int)
    # log and display
    parser.add_argument('--use_tfboard', dest='use_tfboard',
                        help='whether use tensorflow tensorboard',
                        default=False, type=bool)
    parser.add_argument('--log_dir', dest='log_dir',
                        help='directory to save logs', default='logs',
                        type=str)
    args = parser.parse_args()
    return args


class sampler(Sampler):
    def __init__(self, train_size, batch_size):
        self.num_data = train_size
        self.num_per_batch = int(train_size / batch_size)
        self.batch_size = batch_size
        self.range = torch.arange(0, batch_size).view(1, batch_size).long()
        self.leftover_flag = False
        if train_size % batch_size:
            self.leftover = torch.arange(self.num_per_batch * batch_size, train_size).long()
            self.leftover_flag = True

    def __iter__(self):
        rand_num = torch.randperm(self.num_per_batch).view(-1, 1) * self.batch_size
        self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
        self.rand_num_view = self.rand_num.view(-1)

        if self.leftover_flag:
            self.rand_num_view = torch.cat((self.rand_num_view, self.leftover), 0)

        return iter(self.rand_num_view)

    def __len__(self):
        return self.num_data
        
if __name__ == '__main__':
    args = parse_args()
    shots_2 = 50
    local_image_size = 224
    '''
    print('Called with args:')
    print(args)
    '''
    

    if args.dataset == "coco2017":
        args.imdb_name = "coco_2017_train"
        args.imdbval_name = "coco_2017_val"
        args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
    elif args.dataset == "coco":
        args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
        args.imdbval_name = "coco_2014_minival"
        args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
    elif args.dataset == "pascal_voc_0712":
        if args.phase == 1: # three types of base and novel classes splits
            if args.meta_type == 1:
                args.imdb_name = "voc_2007_train_first_split+voc_2012_train_first_split"
            elif args.meta_type == 2:
                args.imdb_name = "voc_2007_train_second_split+voc_2012_train_second_split"
            elif args.meta_type == 3:
                args.imdb_name = "voc_2007_train_third_split+voc_2012_train_third_split"
            elif args.meta_type == 0:
                args.imdb_name = "voc_2007_trainval"
        else:
            args.imdb_name = "voc_2007_shots" # the default sampled shots  saved path of meta classes in the first phase
        args.imdbval_name = "voc_2007_test"
        args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
     # the number of sets of metaclass
    cfg.TRAIN.META_TYPE = args.meta_type

    cfg.USE_GPU_NMS = args.cuda
    if args.cuda:
        cfg.CUDA = True

    args.cfg_file = "cfgs/res101_ms.yml"
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    '''
    print('Using config:')
    pprint.pprint(cfg)
    '''
    np.random.seed(cfg.RNG_SEED)
    if torch.cuda.is_available() and not args.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")
    if args.dataset == "pascal_voc_0712":
    # Second phase only use fewshot number of base and novel classes
        shots = args.shots
        if args.meta_type == 1:  #  use the first sets of all classes
            metaclass = cfg.TRAIN.ALLCLASSES_FIRST
        if args.meta_type == 2:  #  use the second sets of all classes
            metaclass = cfg.TRAIN.ALLCLASSES_SECOND
        if args.meta_type == 3:  #  use the third sets of all classes
            metaclass = cfg.TRAIN.ALLCLASSES_THIRD
        
        # prepare meta sets for meta training
        if args.meta_train:
            # construct the input dataset of PRN network
            img_size = local_image_size
            img_set = [('2007', 'trainval')]
            metadataset_1 = MetaDataset('data/VOCdevkit2007',
                                        img_set, metaclass, img_size, shots=shots, shuffle=True, phase=args.phase, local=args.local)

            metaloader_1 = torch.utils.data.DataLoader(metadataset_1, batch_size=1, shuffle=False, num_workers=0,
                                                    pin_memory=False)
    metadataset_2 = MetaDataset('data/VOCdevkit2007',
                                        img_set, metaclass, img_size, shots=shots_2, shuffle=True, phase=args.phase, local=args.local)
    metaloader_2 = torch.utils.data.DataLoader(metadataset_2, batch_size=1, shuffle=False, num_workers=0,
                                                    pin_memory=False)
    # imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
    # initilize the network here
    imdb_classes = ['__background__', 'aeroplane', 'bicycle', 'boat', 'bottle', 'car', 'cat', 'chair', 'diningtable', 'dog', 'horse', 'person', 'pottedplant', 'sheep', 'train', 'tvmonitor', 'bird', 'bus', 'cow', 'motorbike', 'sofa']
    
    if args.net == 'metarcnn':
        fasterRCNN = resnet(imdb_classes, 101, pretrained=True, class_agnostic=args.class_agnostic,
                            meta_train=args.meta_train, meta_loss=args.meta_loss, nuc_loss=args.nuc_loss, cosine=args.cosine, local=args.local, sim=args.sim)
    fasterRCNN.create_architecture()
    
    if args.cuda:
        fasterRCNN.cuda()
        
    output_dir = args.save_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    for epoch in range(21, 31):
        
        if args.resume:
            load_name = os.path.join(output_dir,
                                     '{}_metarcnn_{}_{}_{}.pth'.format(args.dataset, args.checksession,
                                                                       epoch, args.checkpoint))
            print("loading checkpoint %s" % (load_name))
            checkpoint = torch.load(load_name)
            # the number of classes in second phase is different from first phase
            if args.phase == 2:
                new_state_dict = OrderedDict()
                for k, v in checkpoint['model'].items():
                    name = k
                    new_state_dict[name] = v
                fasterRCNN.load_state_dict(new_state_dict)
    
            if 'pooling_mode' in checkpoint.keys():
                cfg.POOLING_MODE = checkpoint['pooling_mode']
            print("loaded checkpoint %s" % (load_name))
        
        torch.backends.cudnn.enabled = False
        
        class_attentions = torch.zeros(20, shots, 2048).cuda()
        meta_iter = iter(metaloader_1)
        for i in range(shots):
            prndata, prncls = next(meta_iter)
            im_data_list = []
            im_info_list = []
            gt_boxes_list = []
            num_boxes_list = []
            im_data = torch.FloatTensor(1)
            if args.cuda:
                im_data = im_data.cuda()
            im_data = Variable(im_data, volatile=True)
            im_data.data.resize_(prndata.squeeze(0).size()).copy_(prndata.squeeze(0))
            im_data_list.append(im_data)
            attentions = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list,
                                            average_shot=True)
            # pdb.set_trace()
            for idx, cls in enumerate(prncls):
                # class_attentions[int(cls)].append(attentions[idx].data)
                class_attentions[int(cls), i, :] = attentions[idx].data

        attention_dict = {'class_attentions': class_attentions.tolist()}
        attention_file = 'model_path/pooled_feat/att_shots_%.0f_epoch_%.0f.json' % (shots, epoch)
        with open(attention_file,'w') as f:
            json.dump(attention_dict, f)
            
        class_attentions = torch.zeros(20, shots_2, 2048).cuda()
        meta_iter = iter(metaloader_2)
        for i in range(shots_2):
            prndata, prncls = next(meta_iter)
            im_data_list = []
            im_info_list = []
            gt_boxes_list = []
            num_boxes_list = []
            im_data = torch.FloatTensor(1)
            if args.cuda:
                im_data = im_data.cuda()
            im_data = Variable(im_data, volatile=True)
            im_data.data.resize_(prndata.squeeze(0).size()).copy_(prndata.squeeze(0))
            im_data_list.append(im_data)
            attentions = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list,
                                            average_shot=True)
            # pdb.set_trace()
            for idx, cls in enumerate(prncls):
                # class_attentions[int(cls)].append(attentions[idx].data)
                class_attentions[int(cls), i, :] = attentions[idx].data

        attention_dict = {'class_attentions': class_attentions.tolist()}
        attention_file = 'model_path/pooled_feat/att_shots_%.0f_epoch_%.0f.json' % (shots_2, epoch)
        with open(attention_file,'w') as f:
            json.dump(attention_dict, f)
        
        
        file1 = 'model_path/pooled_feat/att_shots_%.0f_epoch_%.0f.json' % (shots, epoch)
        att_data1 = json.load(open(file1, 'r'))
        att_data1 = np.array(att_data1['class_attentions']).reshape(-1, 2048)
        file2 = 'model_path/pooled_feat/att_shots_%.0f_epoch_%.0f.json' % (shots_2, epoch)
        att_data2 = json.load(open(file2, 'r'))
        att_data2 = np.array(att_data2['class_attentions']).reshape(-1, 2048)
        
        t1 = time.time()
        print('tsne start')
        tsne1 = TSNE(n_components = 2).fit_transform(att_data1)
        t2 = time.time()
        print('tsne time used: %.2f' % float(t2 - t1))
        print('tsne start')
        tsne2 = TSNE(n_components = 2).fit_transform(att_data2)
        t3 = time.time()
        print('tsne time used: %.2f' % float(t3 - t2))
        
        color = ['#66FFFF',
        '#808080',
        '#00FFFF',
        '#7FFFD4',
        '#008000',
        '#90EE90',
        '#DC143C',
        '#FFA500',
        '#FFEBCD',
        '#0000FF',
        '#8A2BE2',
        '#A52A2A',
        '#DEB887',
        '#5F9EA0',
        '#FF7F50',
        '#D2691E',
        '#7FFF00',
        '#6495ED',
        '#000000',
        '#D3D3D3',
        '#FFD700']
        
        plt.cla()
        for cls in range(20):
    
            x = tsne1[cls * shots : (cls + 1) * shots, 0]
            y = tsne1[cls * shots : (cls + 1) * shots, 1]
        
            plt.scatter(x, y, s=4, c = color[cls + 1], marker = '*')
        
        plt.legend(['aeroplane', 'bicycle', 'boat', 'bottle', 'car', 'cat', 'chair', 'diningtable', 'dog', 'horse', 'person', 'pottedplant', 'sheep', 'train', 'tvmonitor', 'bird', 'bus', 'cow', 'motorbike', 'sofa'], bbox_to_anchor=(0.95, 1))
        plt.savefig('attentions/attention_tsne_result_%.0f_epoch_%.0f.jpg' % (shots, epoch), dpi=1000)
        for cls in range(20):
            x = tsne2[cls * shots_2 : (cls + 1) * shots_2, 0]
            y = tsne2[cls * shots_2 : (cls + 1) * shots_2, 1]
        
            plt.scatter(x, y, s=1, c = color[cls + 1], alpha=0.5)
        
        

              
        
        plt.savefig('attentions/attention_tsne_result_%.0f_%.0f_epoch_%.0f.jpg' % (shots, shots_2, epoch), dpi=1000)
        t4 = time.time()
        print('scatter plot time used: %.2f' % (t4 - t3))