# -*- coding: utf-8 -*-

import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import scipy.io
import yaml

import Models

from fp16utils import network_to_half
from torchvision.transforms.functional import InterpolationMode

# #fp16
# try:
#     from apex.fp16_utils import *
# except ImportError: # will be 3.x series
#     print('This is not an error. If you want to use low precision, i.e., fp16, please install the apex with cuda support (https://github.com/NVIDIA/apex) and update pytorch to 1.0')
# ######################################################################
# Options
# --------

parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0  0,1,2  0,2')
parser.add_argument('--which_epoch',default='59', type=str, help='0,1,2,3...or last')

parser.add_argument('--test_dir',default='E:/BaiduNetdiskDownload/Market1501/pytorch',type=str, help='./test_data')
parser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')
parser.add_argument('--batchsize', default=5, type=int, help='batchsize')


opt = parser.parse_args()


str_ids = opt.gpu_ids.split(',')
#which_epoch = opt.which_epoch
name = opt.name
test_dir = opt.test_dir

gpu_ids = []
for str_id in str_ids:
    id = int(str_id)
    if id >=0:
        gpu_ids.append(id)

# set gpu ids
if len(gpu_ids)>0:
    torch.cuda.set_device(gpu_ids[0])
    cudnn.benchmark = True

######################################################################
# Load Data
# ---------
#
# We will use torchvision and torch.utils.data packages for loading the
# data.
#
data_transforms = transforms.Compose([
        # transforms.Resize((256,128), interpolation=InterpolationMode.BILINEAR),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
############### Ten Crop        
        #transforms.TenCrop(224),
        #transforms.Lambda(lambda crops: torch.stack(
         #   [transforms.ToTensor()(crop) 
          #      for crop in crops]
           # )),
        #transforms.Lambda(lambda crops: torch.stack(
         #   [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop)
          #       for crop in crops]
          # ))
])




data_dir = test_dir


image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
                                         shuffle=False, num_workers=10) for x in ['gallery','query']}
class_names = image_datasets['query'].classes
# 爆显存
use_gpu = False

######################################################################
# Load model
#---------------------------
def load_network(network):
    # save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)
    # TODO 待训练完
    network.load_state_dict(torch.load('./model/ft_ResNet50/person_train_net_39.pth'), False)
    return network


######################################################################
# Extract feature
# ----------------------
#
# Extract feature from  a trained model.
# 水平翻转图片
def fliplr(img):
    '''flip horizontal'''
    inv_idx = torch.arange(img.size(3)-1,-1,-1).long()  # N x C x H x W
    img_flip = img.index_select(3,inv_idx)
    return img_flip

def extract_feature(model,dataloaders):
    features = torch.FloatTensor()
    count = 0
    for data in dataloaders:
        img, label = data
        n, c, h, w = img.size()
        print(img.size())
        count += n
        print(count)

        ff = torch.FloatTensor(n,751).zero_()

        for i in range(2):
            if(i==1):
                img = fliplr(img)
            input_img = Variable(img.cuda())

            outputs = model(input_img) 
            f = outputs.data.cpu().float()
            ff = ff+f

        fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
        ff = ff.div(fnorm.expand_as(ff))

        features = torch.cat((features,ff), 0)
    return features

def get_id(img_path):
    camera_id = []
    labels = []
    for path, v in img_path:
        #filename = path.split('/')[-1]
        filename = os.path.basename(path)
        # print(filename)
        label = filename[0:4]
        camera = filename.split('c')[1]
        if label[0:2]=='-1':
            labels.append(-1)
        else:
            labels.append(int(label))
        camera_id.append(int(camera[0]))
    return camera_id, labels
if __name__ == '__main__':

    gallery_path = image_datasets['gallery'].imgs
    query_path = image_datasets['query'].imgs
    gallery_cam,gallery_label = get_id(gallery_path)
    query_cam,query_label = get_id(query_path)


    if opt.multi:
        mquery_path = image_datasets['multi-query'].imgs
        mquery_cam,mquery_label = get_id(mquery_path)

    ######################################################################
    # Load Collected data Trained model
    # gallery_num: Matket:751; DukeMTMC-reID:702 ; cuhk03:767
    print('-------test-----------')


    model = load_network(Models.resnet50().cuda())



    # Change to test mode
    model = model.eval()
    if use_gpu:
        model = model.cuda()

    # Extract feature
    with torch.no_grad():
        gallery_feature = extract_feature(model,dataloaders['gallery'])
        query_feature = extract_feature(model,dataloaders['query'])
        # if opt.multi:
        #     mquery_feature = extract_feature(model,dataloaders['multi-query'])

    # Save to Matlab for check
    result = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label,'gallery_cam':gallery_cam,'query_f':query_feature.numpy(),'query_label':query_label,'query_cam':query_cam}
    scipy.io.savemat('pytorch_result.mat',result)

