import json
import os
import time
import random
import numpy as np
import matplotlib.image as mpimg

import io
import scipy.io as matio
import os
import os.path
from PIL import Image
import time
import re
import ipdb

import torch
import torch.utils.data
import torch.nn.parallel as para
from torch import nn, optim
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
# from torch.nn import Paramete

import pickle

import _init_paths
import sys
import cv2
import imutils
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as dset
# from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.utils.blob import im_list_to_blob
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import random
from random import choice

import imageio

try:
    xrange  # Python 2
except NameError:
    xrange = range  # Python 3


# ----------------------------fasterrcnn------------------------------------------------------
def _get_image_blob(im):
    pass
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.PIXEL_MEANS

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        pass
        im_scale = float(target_size) / float(im_size_min)
        if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
            im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                        interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)
    blob = im_list_to_blob(processed_ims)
    return blob, np.array(im_scale_factors)


def fasterrcnn_load_caculate(lopath):
    pascal_classes = np.asarray(['__background__',
                                 'short sleeve top', 'long sleeve top', 'short sleeve shirt', 'long sleeve shirt',
                                 'vest top', 'sling top', 'sleeveless top', 'short outwear', 'short vest',
                                 'long sleeve dress', 'short sleeve dress', 'sleeveless dress', 'long vest',
                                 'long outwear', 'bodysuit', 'classical',
                                 'short skirt', 'medium skirt', 'long skirt', 'shorts', 'medium shorts',
                                 'trousers', 'overalls'])
    fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=False)
    fasterRCNN.create_architecture()
    #     load_name = 'res_model_save/faster_rcnn_1_6_10263.pth'
    checkpoint = torch.load(lopath)
    fasterRCNN.load_state_dict(checkpoint['model'])
    if 'pooling_mode' in checkpoint.keys():
        cfg.POOLING_MODE = checkpoint['pooling_mode']
    print('load model successfully!')
    return fasterRCNN


def fasterrcnn_embd(fasterrcnn, imagepath):
    res_embd = torch.zeros(4096)
    pascal_classes = np.asarray(['__background__',
                                 'short sleeve top', 'long sleeve top', 'short sleeve shirt', 'long sleeve shirt',
                                 'vest top', 'sling top', 'sleeveless top', 'short outwear', 'short vest',
                                 'long sleeve dress', 'short sleeve dress', 'sleeveless dress', 'long vest',
                                 'long outwear', 'bodysuit', 'classical',
                                 'short skirt', 'medium skirt', 'long skirt', 'shorts', 'medium shorts',
                                 'trousers', 'overalls'])
    im_data = torch.FloatTensor(1)
    im_info = torch.FloatTensor(1)
    num_boxes = torch.LongTensor(1)
    gt_boxes = torch.FloatTensor(1)

    im_data = im_data.cuda()
    im_info = im_info.cuda()
    num_boxes = num_boxes.cuda()
    gt_boxes = gt_boxes.cuda()

    with torch.no_grad():
        im_data = Variable(im_data)
        im_info = Variable(im_info)
        num_boxes = Variable(num_boxes)
        gt_boxes = Variable(gt_boxes)

    fasterrcnn.cuda()
    fasterrcnn.eval()

    with torch.no_grad():
        im_in = np.array(imageio.imread(imagepath))
        if len(im_in.shape) == 2:
            im_in = im_in[:, :, np.newaxis]
            im_in = np.concatenate((im_in, im_in, im_in), axis=2)
        im_in = im_in[:, :, ::-1]
        im = im_in

        blobs, im_scales = _get_image_blob(im)
        assert len(im_scales) == 1, "Only single-image batch implemented"
        im_blob = blobs
        im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
        im_data_pt = torch.from_numpy(im_blob)
        im_data_pt = im_data_pt.permute(0, 3, 1, 2)
        im_info_pt = torch.from_numpy(im_info_np)
        im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
        im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
        gt_boxes.resize_(1, 1, 5).zero_()
        num_boxes.resize_(1).zero_()

        # pooled_feat:[300, 4096]
        rois, cls_prob, bbox_pred, \
        rpn_loss_cls, rpn_loss_box, \
        RCNN_loss_cls, RCNN_loss_bbox, \
        rois_label, pooled_feat = fasterrcnn(im_data, im_info, gt_boxes, num_boxes)

        scores = cls_prob.data
        boxes = rois.data[:, :, 1:5]
        box_deltas = bbox_pred.data
        box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor([0.1, 0.1, 0.2, 0.2]).cuda() \
                     + torch.FloatTensor([0.0, 0.0, 0.0, 0.0]).cuda()
        box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
        pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
        pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
        pred_boxes /= im_scales[0]

        scores = scores.squeeze()
        pred_boxes = pred_boxes.squeeze()

        res_sco = 0.0

        res_label = ''
        res_bbox = []

        for j in xrange(1, len(pascal_classes)):
            max_s_score = float(max(scores[:, j]))
            if max_s_score >= res_sco:
                res_sco = max_s_score
                ind_id = int(torch.argmax(scores[:, j]))  # 最大得分index
                dic_coent_bbox = pred_boxes[ind_id][j * 4:(j + 1) * 4]
                res_bbox = [int(dic_coent_bbox[0]), int(dic_coent_bbox[1]), int(dic_coent_bbox[2]),
                            int(dic_coent_bbox[3])]
                res_label = pascal_classes[j]

    return res_bbox, res_label


# box = [80, 300, 500, 800]
# labels = ["short sleeve top", "long sleeve top", "short sleeve shirt", "long sleeve shirt",
#           "vest top", "sling top", "sleeveless top", "short outwear", "short vest",
#           "long sleeve dress", "short sleeve dress", "sleeveless dress", "long vest",
#           "long outwear", "bodysuit", "classical", "short skirt", "medium skirt", "long skirt",
#           "shorts", "medium shorts", "trousers", "overalls"]

# image_path = "/tcdata/test_dataset_B/image/"
image_path = "/tcdata/test_dataset_B/image/"
CUDA = 1

allids_video = os.listdir('/myspace/testVideoTranPic')
allids_image = os.listdir('/tcdata/test_dataset_B/image')

submission = {}
submit = json.loads(json.dumps(submission))

predict_data = np.load("candidate_image_index_list_p101.npy", allow_pickle=True).item()
# image_test_id = np.load("image_test_id.npy", allow_pickle=True)
# video_test_id = np.load("video_test_id.npy", allow_pickle=True)
start_time = time.time()
fasterrcnn_image_test = fasterrcnn_load_caculate('/myspace/res_model_fasterrcnn_image/faster_rcnn_1_7_10263.pth')
if CUDA:
    fasterrcnn_image_test = fasterrcnn_image_test.cuda()

for video_index in range(len(predict_data)):
    image_index = predict_data.get(video_index)[np.random.randint(100, size=1).item()].item()
    item_id = allids_image[image_index]
    image_file_path = image_path + item_id
    image_file = os.listdir(image_file_path)

    video_id = allids_video[video_index]
    results = {'item_id': item_id, 'result': []}
    submit[video_id] = results

    result = []
    # image_file_index = random.randint(0, image_file_num - 1)
    # result1 = {
    #     'img_name': str(image_file_index),
    #     'box': box,
    #     'label': labels[random.randint(0, 22)]
    # }
    # result.append(result1)
    for image_file_index in range(len(image_file)):
        img_pa = image_path + item_id + '/' + image_file[image_file_index]
        box, label = fasterrcnn_embd(fasterrcnn_image_test, img_pa)
        #         print('--------------------------------------------')
        #         print(box)
        #         print(label)
        result1 = {
            'img_name': str(image_file_index),
            'box': box,
            'label': label
        }
        result.append(result1)

    submit[video_id]['result'] = result
    if (video_index % 100 == 0):
        with open('test_sub.txt', 'w') as file:
            file.write("0-10000 | index:{} | time:{}".format(
                video_index, round((time.time() - start_time), 4)
            ))

article = json.dumps(submit, ensure_ascii=False)
filename = 'result.json'
fileObject = open(filename, 'w')
fileObject.write(article)
fileObject.close()
