from util import utils
import parser
from net import models
import sys
import random
import numpy as np
import math
from util.loss import TripletLoss
from util.cmc import Video_Cmc
from util.tools import *

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.transforms import Compose,ToTensor,Normalize,Resize
import torch.backends.cudnn as cudnn
cudnn.benchmark=True
import os
import time
import datetime
from apex import amp
#os.environ['CUDA_VISIBLE_DEVICES']='0'
torch.multiprocessing.set_sharing_strategy('file_system')


def validation(network,dataloader,args):
    network.eval()
    gallery_features = []
    gallery_labels = []
    gallery_cams = []
    with torch.no_grad():
        for c,data in enumerate(dataloader):
            seqs = data[0].cuda()
            label = data[1]
            cams = data[2]
            feat = network(seqs)#.cpu().numpy() #[xx,128]
            gallery_features.append(feat.cpu())
            gallery_labels.append(label)
            gallery_cams.append(cams)

    gallery_features = torch.cat(gallery_features,dim=0).numpy()
    gallery_labels = torch.cat(gallery_labels,dim=0).numpy()
    gallery_cams = torch.cat(gallery_cams,dim=0).numpy()

    Cmc,mAP = Video_Cmc(gallery_features,gallery_labels,gallery_cams,dataloader.dataset.query_idx,10000)
    network.train()

    return Cmc[0],mAP

    
if __name__ == '__main__':
    #Parse args
    args = parser.parse_args()
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    sys.stdout = Logger(osp.join('log',args.checkpoint_name+'_'+'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # set transformation (H flip is inside dataset)
    train_transform = Compose([Resize((args.h,args.w)),ToTensor(),Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])])
    test_transform = Compose([Resize((args.h,args.w)),ToTensor(),Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])])
    # dataset info 
    train_txt='yourpath/train_path.txt'
    train_info='yourpath/train_info.npy'
    test_txt='yourpath/test_path.txt'
    test_info='yourpath/test_info.npy'
    query_info='yourpath/query_IDX.npy'


    print('Start dataloader...')
    train_dataloader = utils.Get_Video_train_DataLoader(train_txt,train_info, train_transform, shuffle=True,num_workers=args.num_workers,\
                                                        S=args.S,track_per_class=args.track_per_class,class_per_batch=args.class_per_batch)
    num_class = train_dataloader.dataset.n_id
    test_dataloader = utils.Get_Video_test_DataLoader(test_txt,test_info,query_info,test_transform,batch_size=args.batch_size,\
                                                 shuffle=False,num_workers=args.num_workers,S=args.S,distractor=True)
    print('End dataloader...\n')


    network = models.CNN(args.latent_dim,model_type=args.model_type,num_class=num_class,stride=args.stride,args=args)

    if args.load_ckpt is not None:
        state = torch.load(args.load_ckpt)
        network.load_state_dict(state)
    
    # Train loop
    # 1. Criterion
    criterion_triplet = TripletLoss('soft',True)

    criterion_ID = nn.CrossEntropyLoss().cuda()
    # 2. Optimizer
    optimizer = optim.Adam(network.parameters(),lr = args.lr,weight_decay=args.weight_decay)

    scheduler = optim.lr_scheduler.StepLR(optimizer, args.lr_step_size,gamma=args.gamma)

    network =network.cuda()
    network, optimizer = amp.initialize(network, optimizer, opt_level="O1")
    network = nn.DataParallel(network)
    print("network:",network)

    id_loss_list = []
    trip_loss_list = []
    track_id_loss_list = []

    #data_list=list(train_dataloader) 

    best_cmc = 0
    start_time = time.time()
    for e in range(args.n_epochs):
        lr=get_lr(optimizer)
        print('Epoch:',e,'current lr:',lr)
        # Validation
        if (e+1)%args.eval_step == 0:
            cmc,map = validation(network,test_dataloader,args)
            print('epoch %d, rank-1 %f , mAP %f\n'%(e,cmc,map))
            if args.frame_id_loss:
                pass
                print('Frame ID loss : %r\n'%(id_loss_list))
            if args.track_id_loss:
                print('Track ID loss : %r\n'%(track_id_loss_list))
            print('Trip Loss : %r\n'%(trip_loss_list))

            id_loss_list = []
            trip_loss_list = []
            track_id_loss_list = []

            is_best=cmc>best_cmc
            if is_best:
                best_cmc=cmc
            save_checkpoint({
            'state_dict':network.state_dict(),
            'rank1': cmc,
            'epoch': e,
            'mAP':map
            }, is_best, 'model_location'+'/'+args.checkpoint_name)
        # Training
        total_id_loss = 0 
        total_trip_loss = 0 
        total_track_id_loss = 0
        epoch_time=time.time()
        for i,data in enumerate(train_dataloader):
            seqs = data[0]#.cuda()
            labels = data[1].cuda()
            seqs = seqs.reshape( (seqs.shape[0]*seqs.shape[1],) +seqs.shape[2:] ).cuda()
            feat, output = network(seqs) 
            
            pool_feat=feat
            pool_output=output
              
            total_loss=0
            for feat in pool_feat:
                trip_loss = criterion_triplet(feat,labels,dis_func='eu')
                total_trip_loss += trip_loss.mean().item()
                total_loss += args.triplet_w * trip_loss.mean()           


            if args.track_id_loss == True:
                track_id_loss = criterion_ID(pool_output,labels)
                total_track_id_loss += track_id_loss.item()
                coeff = 1
                #total_loss += coeff*track_id_loss
                total_loss += args.softmax_w*track_id_loss

            #####################
            optimizer.zero_grad()
            with amp.scale_loss(total_loss,optimizer) as scaled_loss:
                scaled_loss.backward()
            #total_loss.backward()
            optimizer.step()
        
        scheduler.step()

        avg_id_loss = '%.4f'%(total_id_loss/len(train_dataloader))
        avg_trip_loss = '%.4f'%(total_trip_loss/len(train_dataloader))
        avg_track_id_loss = '%.4f'%(total_track_id_loss/len(train_dataloader))
        print('Trip : %s , ID : %s , Track_ID : %s'%(avg_trip_loss,avg_id_loss,avg_track_id_loss))
        id_loss_list.append(avg_id_loss)
        trip_loss_list.append(avg_trip_loss)
        track_id_loss_list.append(avg_track_id_loss)

        epoch_elapsed = round(time.time() - epoch_time)
        epoch_elapsed = str(datetime.timedelta(seconds=epoch_elapsed))
        print("epoch  time (h:m:s): {}".format(epoch_elapsed))
        # consume time
        consume_elapsed = round(time.time() - start_time)
        consume_elapsed = str(datetime.timedelta(seconds=consume_elapsed))
        print("up to now,consume time (h:m:s): {}".format(consume_elapsed))

