from util import utils
import parser
from net import models
import sys
import random
import numpy as np
import math
from util.loss import TripletLoss
from util.cmc import Video_Cmc
from util.tools import *
import profile
import thop
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.transforms import Compose,ToTensor,Normalize,Resize
import torch.backends.cudnn as cudnn
cudnn.benchmark=True
import os
import time
import datetime
#os.environ['CUDA_VISIBLE_DEVICES']='0'
torch.multiprocessing.set_sharing_strategy('file_system')


def validation(network,dataloader,args):
    network.eval()
    gallery_features = []
    gallery_labels = []
    gallery_cams = []
    #filename="npz/"+args.load_ckpt.split('/')[1]
  
    if(args.use_npz):
        #res=np.load(filename+'.npz')
        res=np.load(filename[0])
        res2=np.load(filename[1])
        gallery_features=res['g_features']
        gallery_labels=res['g_labels']
        gallery_cams=res['g_cams']
        best_rank=0
        best_map=0
        for i in np.arange(0,1.01,0.01):
            fuse_gallery_features=(res2['g_features']+i*gallery_features)
        #print("gallery_features:",gallery_features.shape)
            Cmc,mAP = Video_Cmc(fuse_gallery_features,gallery_labels,gallery_cams,dataloader.dataset.query_idx,10000)
            cmc_list=[Cmc[0],Cmc[4],Cmc[9],Cmc[19]]
            #print(i,":",cmc_list[0],mAP)
            print("i",i)
            print(" rank-1 {:.2%} mAP: {:.2%}".format(cmc_list[0],mAP))
            if cmc_list[0]>best_rank:
                best_rank=cmc_list[0]
                best_map=mAP
        print(" best rank-1 {:.2%}  best mAP: {:.2%}".format(best_rank,best_map))
        return 
        #return cmc_list,mAP
    
    with torch.no_grad():
        for c,data in enumerate(dataloader):
            seqs = data[0].cuda()
            label = data[1]
            cams = data[2]
            
            if args.sole:
                feat,x = network(seqs)#.cpu().numpy() #[xx,128]
            else:
                feat= network(seqs)#.cpu().numpy() #[xx,128]
        
            if args.sole:
                if args.feat_idx==4:
                    feat=torch.cat(x,1)
                else:
                    feat=x[args.feat_idx]
            else:
                feat=feat
              
            elif args.temporal =='Done':
                feat = feat
            
            gallery_features.append(feat.cpu())
            gallery_labels.append(label)
            gallery_cams.append(cams)

    gallery_features = torch.cat(gallery_features,dim=0).numpy()
    gallery_labels = torch.cat(gallery_labels,dim=0)
    gallery_labels = gallery_labels.numpy()
    gallery_cams = torch.cat(gallery_cams,dim=0).numpy()
   
    Cmc,mAP = Video_Cmc(gallery_features,gallery_labels,gallery_cams,dataloader.dataset.query_idx,10000)
    network.train()
    cmc_list=[Cmc[0],Cmc[4],Cmc[9],Cmc[19]]

    #return Cmc[5],mAP
    return cmc_list,mAP


if __name__ == '__main__':
    #Parse args
    args = parser.parse_args()
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices

    # set transformation (H flip is inside dataset)
    train_transform = Compose([Resize((args.h,args.w)),ToTensor(),Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])])
    test_transform = Compose([Resize((args.h,args.w)),ToTensor(),Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])])
    # dataset info 
    train_txt='yourpath/train_path.txt'
    train_info='yourpath/train_info.npy'
    test_txt='yourpath/test_path.txt'
    test_info='yourpath/test_info.npy'
    query_info='yourpath/query_IDX.npy'

    train_dataloader = utils.Get_Video_train_DataLoader(train_txt,train_info, train_transform, shuffle=True,num_workers=args.num_workers,\
                                                       S=args.S,track_per_class=args.track_per_class,class_per_batch=args.class_per_batch)
    num_class = train_dataloader.dataset.n_id
    test_dataloader = utils.Get_Video_test_DataLoader(test_txt,test_info,query_info,test_transform,batch_size=args.batch_size,\
                                                 shuffle=False,num_workers=args.num_workers,S=args.S,distractor=True)
    print('End dataloader...\n')

    network = nn.DataParallel(models.CNN(args.latent_dim,model_type=args.model_type,num_class=num_class,stride=args.stride,args=args).cuda())
    if args.use_npz:
        validation(network,test_dataloader,args)
        exit()
    flp=False
    if flp:
        x=torch.randn(1,3,256,128)
        flops, params = profile(network,inputs=(x,))
        #print(flops)
        
    #print("network:",network)

    if args.load_ckpt is not None:
        print("loda_ckpt file:",args.load_ckpt)
        state = torch.load(args.load_ckpt)['state_dict']
        network.load_state_dict(state)
    
    cmc,map = validation(network,test_dataloader,args)
    print("mAP: {:.2%}".format(map))
    for i in cmc:
        print("rank: {:.2%}".format(i))
    
   