import _init_paths
import sys
import cv2
import imutils
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as dset
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.blob import im_list_to_blob
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import numpy as np
import torch
from torch.autograd import Variable
import imageio
import os
from random import choice

try:
    xrange  # Python 2
except NameError:
    xrange = range  # Python 3


# ----------------------------fasterrcnn------------------------------------------------------
def _get_image_blob(im):
    pass
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.PIXEL_MEANS

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        pass
        im_scale = float(target_size) / float(im_size_min)
        if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
            im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                        interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)
    blob = im_list_to_blob(processed_ims)
    return blob, np.array(im_scale_factors)


def fasterrcnn_load_caculate(lopath):
    pascal_classes = np.asarray(['__background__',
                                 'short sleeve top', 'long sleeve top', 'short sleeve shirt', 'long sleeve shirt',
                                 'vest top', 'sling top', 'sleeveless top', 'short outwear', 'short vest',
                                 'long sleeve dress', 'short sleeve dress', 'sleeveless dress', 'long vest',
                                 'long outwear', 'bodysuit', 'classical',
                                 'short skirt', 'medium skirt', 'long skirt', 'shorts', 'medium shorts',
                                 'trousers', 'overalls'])
    fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=False)
    fasterRCNN.create_architecture()
    #     load_name = 'res_model_save/faster_rcnn_1_6_10263.pth'
    checkpoint = torch.load(lopath)
    fasterRCNN.load_state_dict(checkpoint['model'])
    if 'pooling_mode' in checkpoint.keys():
        cfg.POOLING_MODE = checkpoint['pooling_mode']
    print('load model successfully!')
    return fasterRCNN


def fasterrcnn_embd(fasterrcnn, imagepath, CUDA=True):
    res_embd = torch.zeros(4096)
    pascal_classes = np.asarray(['__background__',
                                 'short sleeve top', 'long sleeve top', 'short sleeve shirt', 'long sleeve shirt',
                                 'vest top', 'sling top', 'sleeveless top', 'short outwear', 'short vest',
                                 'long sleeve dress', 'short sleeve dress', 'sleeveless dress', 'long vest',
                                 'long outwear', 'bodysuit', 'classical',
                                 'short skirt', 'medium skirt', 'long skirt', 'shorts', 'medium shorts',
                                 'trousers', 'overalls'])
    im_data = torch.FloatTensor(1)
    im_info = torch.FloatTensor(1)
    num_boxes = torch.LongTensor(1)
    gt_boxes = torch.FloatTensor(1)
    if CUDA:
        im_data = im_data.cuda()
        im_info = im_info.cuda()
        num_boxes = num_boxes.cuda()
        gt_boxes = gt_boxes.cuda()

    with torch.no_grad():
        im_data = Variable(im_data)
        im_info = Variable(im_info)
        num_boxes = Variable(num_boxes)
        gt_boxes = Variable(gt_boxes)
    if CUDA:
        fasterrcnn.cuda()
        fasterrcnn.eval()

    with torch.no_grad():
        im_in = np.array(imageio.imread(imagepath))
        if len(im_in.shape) == 2:
            im_in = im_in[:, :, np.newaxis]
            im_in = np.concatenate((im_in, im_in, im_in), axis=2)
        im_in = im_in[:, :, ::-1]
        im = im_in

        blobs, im_scales = _get_image_blob(im)
        assert len(im_scales) == 1, "Only single-image batch implemented"
        im_blob = blobs
        im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
        im_data_pt = torch.from_numpy(im_blob)
        im_data_pt = im_data_pt.permute(0, 3, 1, 2)
        im_info_pt = torch.from_numpy(im_info_np)
        im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
        im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
        gt_boxes.resize_(1, 1, 5).zero_()
        num_boxes.resize_(1).zero_()

        # pooled_feat:[300, 4096]
        rois, cls_prob, bbox_pred, \
        rpn_loss_cls, rpn_loss_box, \
        RCNN_loss_cls, RCNN_loss_bbox, \
        rois_label, pooled_feat = fasterrcnn(im_data, im_info, gt_boxes, num_boxes)

        scores = cls_prob.data
        boxes = rois.data[:, :, 1:5]
        box_deltas = bbox_pred.data
        box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor([0.1, 0.1, 0.2, 0.2]).cuda() \
                     + torch.FloatTensor([0.0, 0.0, 0.0, 0.0]).cuda()
        box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
        pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
        pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
        pred_boxes /= im_scales[0]

        scores = scores.squeeze()
        pred_boxes = pred_boxes.squeeze()

        res_sco = 0.0

        for j in xrange(1, len(pascal_classes)):
            max_s_score = float(max(scores[:, j]))
            if max_s_score >= res_sco:
                res_sco = max_s_score
                ind_id = int(torch.argmax(scores[:, j]))  # 最大得分index
                res_embd = pooled_feat[ind_id]

    return res_embd


# ----------------------------fasterrcnn------------------------------------------------------

def randomImagePath(image_id):
    image_path = '/data/train/image/' + image_id
    image_file = os.listdir(image_path)
    image_file_jpg = []
    for name in image_file:
        if name[-4:] == '.jpg':
            image_file_jpg.append(name)
    return image_path + '/' + choice(image_file_jpg)


def top_match(rec_scores, item_ids, k):
    # get rec item ids
    rec_scores = rec_scores.detach().cpu().numpy()
    rank_list = (-rec_scores).argsort()[:k]  # start from highest
    item_ids = torch.tensor(item_ids).squeeze()
    rec_list = item_ids[rank_list]
    return rec_list


def my_repeat(text, number_neg_sample_train):
    text = text.cpu()
    repeat_text = torch.empty(0, dtype=torch.long)
    for i in range(len(text)):
        texts = text[i].repeat(number_neg_sample_train)
        repeat_text = torch.cat([repeat_text, texts])
    return repeat_text
