# coding=utf-8
import io
import scipy.io as matio
import os
import os.path
import numpy as np
from PIL import Image
import time
import re
import ipdb


import torch
import torch.utils.data
import torch.nn.parallel as para
from torch import nn, optim
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.nn import Parameter
from MF import FM as rec_model

import pickle

import _init_paths
import sys
import cv2
import imutils
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as dset
# from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.utils.blob import im_list_to_blob
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import random
from random import choice

import imageio

try:
    xrange          # Python 2
except NameError:
    xrange = range  # Python 3

# --Path settings----------------------------------------------------------------------------------
root_path = ''


# --Create dataset----------------------------------------------------------------------------------

# video_text_data (50000*121)
# 50000 video speeches ; the max num of the words is 121
# the max file: 098276.txt
# image_text_data (50000*22)
# 50000 image titles ; the max num of the words is 22
# the max file: 047927.txt,043484.txt
# pkl length:78230 width:100
# video_text_data and image_text_data have adjusted the order of the file

# --manual setting----------------------------------------------------------------------------------
# changed configuration to this instead of argparse for easier interaction
CUDA = 1  # 1 for True; 0 for False
SEED = 1
LOG_INTERVAL = 10

# define some variables
video_num = 50000  #50000
image_num = video_num
video_width = 118
image_width = 23


allids_video = os.listdir('/myspace/testVideoTranPic')
allids_image = os.listdir('/tcdata/test_dataset_B/image')

# load user dict 10000个测试
# video_texts = torch.from_numpy(np.load('video_text_data_test.npy'))
# image_texts = torch.from_numpy(np.load("image_text_data_test.npy"))

textEmbeddings_length = 78230
textEmbeddings_width = 100
k = 10  # top-k items for rec
number_neg_sample_train = 99  # 5



# --load pre-trained embeddings for text-------------------------------------------------------------

# dict_data is a dictionary type
# path = "all_index2vec_dict.pkl"
# with open(path, 'rb') as fo:
#     dict_data = pickle.load(fo, encoding='bytes')

# textEmbeddings = np.zeros([textEmbeddings_length + 1, textEmbeddings_width])
# index = sorted(dict_data.keys())
# for i in range(textEmbeddings_length + 1):
#     for j in range(textEmbeddings_width):
#         if i != 0:
#             textEmbeddings[i][j] = dict_data[index[i - 1]][j]

# embedding_len = textEmbeddings.shape[1]
BATCH_SIZE = 100
# number_sample_eval = BATCH_SIZE*(number_neg_sample_train + 1)-1
number_sample_eval = 9999
learning_rate = 4e-3
decay = 4
# EPOCHS = decay * 2 + 1
EPOCHS = 1
lr_decay_rate = 0.5

# --data loader----------------------------------------------------------------------------------
torch.manual_seed(SEED)
if CUDA:
    torch.cuda.manual_seed(SEED)

# DataLoader instances
kwargs = {'num_workers': 4, 'pin_memory': True} if CUDA else {}


#----------------------------fasterrcnn------------------------------------------------------
def _get_image_blob(im):
    pass
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.PIXEL_MEANS

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        pass
        im_scale = float(target_size) / float(im_size_min)
        if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
            im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)
    blob = im_list_to_blob(processed_ims)
    return blob, np.array(im_scale_factors)

def fasterrcnn_load_caculate(lopath):
    pascal_classes = np.asarray(['__background__',
                           'short sleeve top', 'long sleeve top', 'short sleeve shirt', 'long sleeve shirt',
                           'vest top', 'sling top', 'sleeveless top', 'short outwear', 'short vest',
                           'long sleeve dress', 'short sleeve dress', 'sleeveless dress', 'long vest',
                           'long outwear', 'bodysuit', 'classical',
                           'short skirt', 'medium skirt', 'long skirt', 'shorts', 'medium shorts',
                            'trousers', 'overalls'])
    fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=False)
    fasterRCNN.create_architecture()
#     load_name = 'res_model_save/faster_rcnn_1_6_10263.pth'
    checkpoint = torch.load(lopath)
    fasterRCNN.load_state_dict(checkpoint['model'])
    if 'pooling_mode' in checkpoint.keys():
        cfg.POOLING_MODE = checkpoint['pooling_mode']
    print('load model successfully!')
    return fasterRCNN

def fasterrcnn_embd(fasterrcnn, imagepath):
    res_embd = torch.zeros(4096)
    pascal_classes = np.asarray(['__background__',
                           'short sleeve top', 'long sleeve top', 'short sleeve shirt', 'long sleeve shirt',
                           'vest top', 'sling top', 'sleeveless top', 'short outwear', 'short vest',
                           'long sleeve dress', 'short sleeve dress', 'sleeveless dress', 'long vest',
                           'long outwear', 'bodysuit', 'classical',
                           'short skirt', 'medium skirt', 'long skirt', 'shorts', 'medium shorts',
                            'trousers', 'overalls'])
    im_data = torch.FloatTensor(1)
    im_info = torch.FloatTensor(1)
    num_boxes = torch.LongTensor(1)
    gt_boxes = torch.FloatTensor(1)
    
    im_data = im_data.cuda()
    im_info = im_info.cuda()
    num_boxes = num_boxes.cuda()
    gt_boxes = gt_boxes.cuda()
    
    with torch.no_grad():
        im_data = Variable(im_data)
        im_info = Variable(im_info)
        num_boxes = Variable(num_boxes)
        gt_boxes = Variable(gt_boxes)
    
    fasterrcnn.cuda()
    fasterrcnn.eval()
    
    with torch.no_grad():
        im_in = np.array(imageio.imread(imagepath))
        if len(im_in.shape) == 2:
            im_in = im_in[:,:,np.newaxis]
            im_in = np.concatenate((im_in,im_in,im_in), axis=2)
        im_in = im_in[:,:,::-1]
        im = im_in

        blobs, im_scales = _get_image_blob(im)
        assert len(im_scales) == 1, "Only single-image batch implemented"
        im_blob = blobs
        im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
        im_data_pt = torch.from_numpy(im_blob)
        im_data_pt = im_data_pt.permute(0, 3, 1, 2)
        im_info_pt = torch.from_numpy(im_info_np)
        im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
        im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
        gt_boxes.resize_(1, 1, 5).zero_()
        num_boxes.resize_(1).zero_()
        
        # pooled_feat:[300, 4096]
        rois, cls_prob, bbox_pred, \
            rpn_loss_cls, rpn_loss_box, \
            RCNN_loss_cls, RCNN_loss_bbox, \
            rois_label,pooled_feat= fasterrcnn(im_data, im_info, gt_boxes, num_boxes)
        
        scores = cls_prob.data
        boxes = rois.data[:, :, 1:5]
        box_deltas = bbox_pred.data
        box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor([0.1, 0.1, 0.2, 0.2]).cuda() \
                                     + torch.FloatTensor([0.0, 0.0, 0.0, 0.0]).cuda()
        box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
        pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
        pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)        
        pred_boxes /= im_scales[0]

        scores = scores.squeeze()
        pred_boxes = pred_boxes.squeeze()
        
        res_sco = 0.0
        
        for j in xrange(1, len(pascal_classes)):
            max_s_score = float(max(scores[:,j]))
            if max_s_score >= res_sco:
                res_sco = max_s_score
                ind_id = int(torch.argmax(scores[:,j]))#最大得分index
                res_embd = pooled_feat[ind_id]

    return res_embd

#----------------------------fasterrcnn------------------------------------------------------   


# --model setup----------------------------------------------------------------------------------

class myModel(nn.Module):
    def __init__(self):
        super(myModel, self).__init__()
        self.video_fasterrcnn = fasterrcnn_load_caculate('/myspace/res_model_fasterrcnn_image/faster_rcnn_1_7_10263.pth')
        self.image_fasterrcnn = fasterrcnn_load_caculate('/myspace/res_model_fasterrcnn_image/faster_rcnn_1_7_10263.pth')
        
        self.rec_model = rec_model(textEmbeddings_width, video_num, image_num)

    def forward(self, video_id, sample_ids, video_text, image_list_test):
#         videoEmbs = torch.zeros(2, 4096)
#         imageEmbs = torch.zeros(2, 4096)
        
#         videoEmbs[0] = fasterrcnn_embd(self.video_fasterrcnn, video_path)
#         videoEmbs[1] = videoEmbs[0]
        
#         imageEmbs[0] = fasterrcnn_embd(self.image_fasterrcnn, image_path)
#         imageEmbs[1] = fasterrcnn_embd(self.image_fasterrcnn, image_neg_path)
#         if CUDA:
#             videoEmbs = videoEmbs.cuda()
#             imageEmbs = imageEmbs.cuda()

        y = self.rec_model(video_id, sample_ids, video_text, image_list_test)
        return y

# optimizer
# optimizer = optim.Adam(model.parameters(), weight_decay=1e-5, lr=learning_rate)
# -- optimizer -----------------------------------------------------------
def get_optim(model):
    model_optim = [v for k, v in model.named_parameters() if
                   (not k.startswith('data_embeddings'))
                   ]  # len:62
    params_model = [{'params': model_optim}]

    # optimizer
    optimizer = optim.Adagrad(params_model, lr=learning_rate, weight_decay=1e-5)
    return optimizer


# -- Loss ------------------------------------------------------------------
def loss_function(pos_scores, neg_scores):
    difference = pos_scores - neg_scores
    bpr_loss = - F.logsigmoid(difference)
    total_len = len(difference)
    auc = len(np.where(difference > 0)[0]) / float(total_len)
    return torch.mean(bpr_loss), auc


# -- performance compute -------------------------------------------------------------
def top_match(rec_scores, item_ids, k):
    rec_scores = rec_scores.detach().cpu().numpy()
    rank_list = (-rec_scores).argsort()[:k]  # start from highest
    item_ids = torch.tensor(item_ids).squeeze()
    rec_list = item_ids[rank_list]

    return rec_list



def my_repeat(text, number_neg_sample_train):
    text = text.cpu()
    repeat_text = torch.empty(0, dtype=torch.long)
    for i in range(len(text)):
        texts = text[i].repeat(number_neg_sample_train)
        repeat_text = torch.cat([repeat_text, texts])
    return repeat_text

# -- lr ------------------------------------------------------------------
def lr_scheduler(optimizer, init_lr, epoch, lr_decay_iter):
    if epoch % lr_decay_iter:
        return init_lr

    # drop to 0.1*init_lr
    lr = init_lr * lr_decay_rate
    optimizer.param_groups[0]['lr'] = lr

    return lr


# update the model by the pre-trained parameters for myModel -------------------
def get_updateModel(model_path):
    # load pred model
    model_dict_pretrained = torch.load(model_path, map_location='cpu')  # 6
    cur_model_dict = model.state_dict()  # 6
    shared_dict = {k: v for k, v in model_dict_pretrained.items() if k in cur_model_dict}  # 6
    cur_model_dict.update(shared_dict)
    model.load_state_dict(cur_model_dict)


# -- val ------------------------------------------------------------------
def sampler(number_sample_eval, index):
    # get groundtruth items
    gt_item_ids = index
    neg_samples_index = []

    image_texts_index = [id for id in range(len(image_texts))]
    for i in range(1):
        # get negative samples
        # filter samples
        samples_to_filter = index

        sample_pool = np.setdiff1d(image_texts_index, samples_to_filter)

        # sample neg samples from the pool
        neg_sample_ids = np.random.choice(sample_pool, number_sample_eval, replace=False)  # shape(500,)
        for j in range(number_sample_eval):
            neg_samples_index.append(neg_sample_ids[j].item())

    # concate gt and neg samples
    gt_item_ids = torch.from_numpy(np.array(gt_item_ids)).unsqueeze(0)  # torch.Size([1])
    neg_sample_ids = torch.tensor(neg_samples_index).squeeze()
    # neg_sample_ids = torch.from_numpy(neg_samples_index)  # torch.Size([500])

    sample_ids = torch.cat([gt_item_ids, neg_sample_ids])  # torch.Size([501])

    # import ipdb; ipdb.set_trace()
    return sample_ids


# -- calculate the auc ------------------------------------------------------------------
def compute_auc(scores):
    scores = scores.cpu()
    num_pos = len(scores) - number_sample_eval
    score_neg = scores[num_pos:]
    num_hit = 0

    for i in range(num_pos):
        num_hit += len(np.where(score_neg < scores[i])[0])

    auc = num_hit / (num_pos * number_sample_eval)
    return auc



# p,r,f,ndcg =\
#         val(user_list_val, interaction_indicator_val, interaction_indicator_train, valid_item_entries_train, item_pop_train)
# #import ipdb; ipdb.set_trace()

# -- test ------------------------------------------------------------------


def get_len_of_nonzero(texts):
    length = torch.zeros(len(texts), dtype=torch.long)
    for i in range(len(texts)):
        if texts[i][0] == 0:
            length[i] = 0
        elif texts[i][texts[i].shape[0] - 1] != 0:
            length[i] = texts[i].shape[0] - 1
        else:
            length[i] = texts[i].tolist().index(0) - 1
        # if texts[i][j] == 0:
        #     length[i] = j
        #     break
    return length


def arr_size(arr, size):
    s = []
    for i in range(0, int(len(arr)) + 1, size):
        c = arr[i:i + size]
        s.append(c)
    newlist = [x for x in s if x]
    return newlist

candidate_image_index_list = {}

def test(video_list_test, image_list_test):
    print('Start test...')
    model.eval()
    test_id_1 = [id for id in range(len(allids_image))]
    test_id = test_id_1
    test_index = test_id
    sample_ids = torch.tensor(test_index).squeeze()

    if CUDA:
        sample_ids = sample_ids.cuda()

    started_time = time.time()
    

    for index in range(len(allids_video)):
        # create the user input
        video_id = torch.from_numpy(np.array(index)).unsqueeze(0)
        video_id = my_repeat(video_id, len(allids_video))
        video_text = video_list_test[video_id]

        if CUDA:
            video_text = video_text.cuda()
            video_id = video_id.cuda()
#         ipdb.set_trace()

        scores = model(video_id, sample_ids, video_text, image_list_test)

        scores = scores[:10000]
        candidate_image_index_list[index] = top_match(scores, test_index, 1)

        if ((index+1) % 100 == 0):
            print(str(index+1))
            with open('test_p101.txt', 'w') as file:
                file.write("0-10000 | index:{} | time:{}".format(
                    index, round((time.time() - start_time), 4)
                ))


# -- training process ----------------------------------------------------------------

best_loss = 0

model = myModel()
# model = nn.DataParallel(model)
if CUDA:
    model = model.cuda()

model_path = '/myspace/res_FM/model-2.pt'
get_updateModel(model_path)

# optimizer = get_optim(model)
print('model created...')

#----------计算image embd------------------------------
imageEmbs = torch.zeros(len(allids_image), 4096)
fasterrcnn_image_test = fasterrcnn_load_caculate('/myspace/res_model_fasterrcnn_image/faster_rcnn_1_7_10263.pth')
if CUDA:
    fasterrcnn_image_test = fasterrcnn_image_test.cuda()
for i in range(len(allids_image)):
    print('done:' + str(i))
    image_path = '/tcdata/test_dataset_B/image/' + allids_image[i]
    image_names = os.listdir(image_path)
    i_path = image_path + '/' + choice(image_names)
    imageEmbs[i] = fasterrcnn_embd(fasterrcnn_image_test, i_path)
#----------计算image embd------------------------------


#----------计算video embd------------------------------
videoEmbs = torch.zeros(len(allids_video), 4096)
fasterrcnn_video_test = fasterrcnn_load_caculate('/myspace/res_model_fasterrcnn_image/faster_rcnn_1_7_10263.pth')
if CUDA:
    fasterrcnn_video_test = fasterrcnn_video_test.cuda()
for i in range(len(allids_video)):
    if i % 1000 == 0:
        print('done:' + str(i))
    video_path = '/myspace/testVideoTranPic/' + allids_video[i]
#     image_names = os.listdir(image_path)
    v_path = video_path + '/200.jpg'
    videoEmbs[i] = fasterrcnn_embd(fasterrcnn_video_test, v_path)
#----------计算video embd------------------------------


# videoEmbs = torch.from_numpy(np.load('videoEmbs.npy'))
# imageEmbs = torch.from_numpy(np.load('imageEmbs.npy'))


if CUDA:
    videoEmbs = videoEmbs.cuda()
    imageEmbs = imageEmbs.cuda()


# test
start_time = time.time()
test(videoEmbs, imageEmbs)
np.save("candidate_image_index_list_p101", candidate_image_index_list)
# import json

# with open('candidate_image_index_list1_p101.txt', 'w') as file:
#     file.write(json.dumps(str(candidate_image_index_list)))
