import os
import sys
import time
import argparse

sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib'))
from nms.nms import soft_nms
import numpy as np
import torch.nn as nn
import torch.utils.data
import torch.distributed as dist
from utils.optims_utils import IterWarmUpCosineDecayMultiStepLRAdjust,PlainRAdam
from utils.utils import _tranpose_and_gather_feature, load_model
from utils.image import transform_preds
# from utils.losses import _neg_loss, _reg_loss,_giou_loss
from utils.summary import  DisablePrint
from utils.post_process import ctdet_decode,ctdet_decode_csl_one
from utils.losses import Loss



parser = argparse.ArgumentParser(description='simple_centernet45')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dist', action ='store_true')
parser.add_argument('--root_dir', type=str, default='./')
parser.add_argument('--data_dir', type=str, default='../_DATASET')#nwpu:./data, dior是../DATASET/Dior/
parser.add_argument('--log_name', type=str, default='test')
parser.add_argument('--method', type=str, default='cls_reg',choices=['wh','tblr','dense','tblr_cor','dfl','dfl_csl','dfl_csl_reg'])
parser.add_argument('--dataset', type=str, default='dior', choices=['coco', 'pascal', 'nwpu_0.2','nwpu_0.6','dior','ssdd','hrrsd','visDrone'])
# parser.add_argument('--arch', type=str, default='resnet(608gn_spp)_50', choices=['large_hourglass', 'resdcn', 'resnet_50'])
parser.add_argument('--arch', type=str, default='dlanet34_cls_sml0.01_std0.005_L0.3_16(EGK_0.7_only_train_800_omega1)_v3', choices=['large_hourglass','dlanet_dcn','dlanet'])
#dlanet_no_dfl_csl_L0.3_16(EGK_0.5_tblr_only_train)
parser.add_argument('--img_size', type=int, default=800)
parser.add_argument('--split_ratio', type=float, default=1)
parser.add_argument('--lr', type=float, default=1e-4)#必须是e-4的级别，不然不收敛, 如果网络是dcn 学习率为5e-4
parser.add_argument('--lr_step', type=str, default='30,35')#160,200,220 for nwpu  30 for dior
parser.add_argument('--batch_size', type=int, default=5)# 8 for 608(img_size)
parser.add_argument('--num_epochs', type=int, default=40)#240 for nwpu    40 for dior

parser.add_argument('--test_topk', type=int, default=100)
parser.add_argument('--log_interval', type=int, default=100)
parser.add_argument('--val_interval', type=int, default=5)
parser.add_argument('--num_workers', type=int, default=2)

#---------------------------------对比实验参数设置-------------------------------
parser.add_argument('--alpha', type=int, default=0.7)#0.1,0.3,0.5,0.7,0.9 EGK的编码比例
parser.add_argument('--hm_kernel', type=str, default="EGK")#EGK 和 CGK,对特征图的编码方式  
parser.add_argument('--points', type=int, default=16)#4,8,16,24， SSB里面点的个数
parser.add_argument('--reg_max', type=int, default=100)#BRB 中的距离选取
parser.add_argument('--omega', type=int, default=1)#每一份的长度为5
#-------------------------------------------------------------------------------
args = parser.parse_args()

# os.chdir(cfg.root_dir)#该方法用于改变当前工作目录到指定的路径。

args.lr_step = [int(s) for s in args.lr_step.split(',')]


def main():
    print("-------------------------------------------------------")
    print("torch版本:",torch.__version__)
    print("CUDA是否可以用:",torch.cuda.is_available())
    print(torch.version.cuda)
    print("-------------------------------------------------------")
    seed = 317
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)  # gpu 为当前GPU设置随机种子
    torch.backends.cudnn.deterministic = False  # cudnn
    torch.backends.cudnn.benchmark = True  # disable this if OOM at beginning of training
    np.random.seed(seed)  # numpy
    
    save_path = "../"+ "_Weights/"+'hcenternet/' + args.dataset + "/" + args.arch+"+"+args.method
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    with open(save_path + '/cfg.txt', 'w') as f:
        for arg in vars(args):  # var()返回对象object的属性和属性值的字典对象
            print('%s: %s' % (arg, getattr(args, arg)), file=f)  # 将结果输出到f文件
    print(args)



    num_gpus = torch.cuda.device_count()
    if args.dist:
        args.device = torch.device('cuda:%d' % args.local_rank)
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend='nccl', init_method='env://',
                                world_size=num_gpus, rank=args.local_rank)
    else:
        args.device = torch.device('cuda')

    print('Setting up data...')
    criterion = Loss(args)
    
    if 'hrrsd' in args.dataset:
        from datasets.hrrsd import COCO, COCO_eval,PascalVOC_eval
    elif 'nwpu' in args.dataset:
        from datasets.nwpu import COCO, COCO_eval,PascalVOC_eval
    elif 'dior' in args.dataset:
        from datasets.dior import COCO, COCO_eval,PascalVOC_eval
    elif 'visDrone' in args.dataset:
        from datasets.drone import COCO, COCO_eval,PascalVOC_eval        
    Dataset = COCO
    train_dataset = Dataset(args, 'train')
#     train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,
#                                                                         num_replicas=num_gpus,
#                                                                         rank=args.local_rank)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size // num_gpus
                                                   if args.dist else args.batch_size,
                                                   shuffle=not args.dist,
                                                   num_workers=args.num_workers,
                                                   pin_memory=True,
                                                   drop_last=True)
    Dataset_eval = COCO_eval
    val_dataset= Dataset_eval(args, 'val', img_size=args.img_size, test_scales=[1.], test_flip=False)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1,
                                                 shuffle=False, num_workers=1, pin_memory=True,
                                                 collate_fn=val_dataset.collate_fn)
    Dataset_eval_VOC = PascalVOC_eval
    val_dataset_VOC = Dataset_eval_VOC(args, 'val', img_size=args.img_size, test_scales=[1.], test_flip=False)
    val_loader_VOC  = torch.utils.data.DataLoader(val_dataset_VOC, batch_size=1,
                                                 shuffle=False, num_workers=1, pin_memory=True,
                                                 collate_fn=val_dataset_VOC.collate_fn)

    cls_num = int(args.reg_max/args.omega)+1
    print("tblr每个分支分的类别数",cls_num)
    heads = {'hm':train_dataset.num_classes, 'tblr': 4*cls_num, 'ssb':args.points}
    print('Creating model...')
    if 'dlanet' in args.arch:
        if 'dcn' in args.arch:
            from nets.dlanet_dcn import DlaNet
            model = DlaNet(args,num_classes=train_dataset.num_classes,heads = heads,head_conv=256)#试一下256效果怎么样
        else:
            from nets.dlanet import DlaNet
            model = DlaNet(args,num_classes=train_dataset.num_classes,head_conv=256)#试一下256效果怎么样
    else:
        raise NotImplementedError
        
#     print(model)
    if args.dist:

        model = model.to(args.device)
        model = nn.parallel.DistributedDataParallel(model,
                                                    device_ids=[args.local_rank, ],
                                                    output_device=args.local_rank)
    else:
#         model = nn.DataParallel(model).to(args.device)
        model = model.to(args.device)

    #加载预训练模型
#     pretrained_net = torch.load('../_Weights/hcenternet/dior/dlanet34_dcn_cls_sml0.05_std0.005_L0.3_16(EGK_0.7_only_train_800_omega1)_v3+cls_reg/best_ap50.pth')
#     model.load_state_dict(pretrained_net)
#     return

    optimizer = PlainRAdam(model.parameters(), args.lr)
#     optimizer = torch.optim.Adam(model.parameters(), args.lr)
#     lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 20, eta_min=0, last_epoch=-1)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_step, gamma=0.1)

    def train(epoch):
        print('\n Epoch: %d' % epoch)
        model.train()
        tic = time.perf_counter()  # 第一次调用per_counter
        for batch_idx, batch in enumerate(train_loader):
            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=args.device, non_blocking=True)

            outputs = model(batch['image'])  # add
            
            #如果是训练dior数据集，除了heatmap的loss权重为1，其他都为0.1
            #nwpus数据集，hmap，w_h_，reg的权重都是1
            
            if args.method == "cls_reg":
                hmap_loss,tblr_loss,mean_loss,var_loss,ssb_loss= criterion(outputs, batch)
                loss = hmap_loss +0.3*(tblr_loss+0.01*mean_loss+0.005*var_loss)+0.1*ssb_loss#默认0.1
        
                
            optimizer.zero_grad()
            loss.backward()
            
            #对梯度进行裁剪
#             torch.nn.utils.clip_grad_norm(parameters=model.parameters(), max_norm=30, norm_type=2)
#             torch.nn.utils.clip_grad_norm_(model.parameters(), 35)
            optimizer.step()

            if batch_idx % args.log_interval == 0:
                duration = time.perf_counter() - tic  # 第二次调用per_counter,所以现在用时为第二次的per_counter减去第一的per_counter
                tic = time.perf_counter()
                if args.method == "cls_reg":
                     print('[%d/%d-%d/%d] ' % (epoch, args.num_epochs, batch_idx, len(train_loader)) +
                          ' hmap_loss= %.5f tblr_loss= %.5f mean_loss= %.5f var_loss= %.5f cor_loss= %.5f ' %
                          (hmap_loss.item(), tblr_loss.item(),mean_loss.item(),var_loss.item(),ssb_loss.item()) +
                          ' (%d samples/sec)' % (args.batch_size * args.log_interval / duration))
                with open(save_path + '/loss.txt', 'a+') as f:
                    
                     f.write('[%d/%d-%d/%d] ' % (epoch, args.num_epochs, batch_idx, len(train_loader)) +
                          ' hmap_loss= %.5f tblr_loss= %.5f mean_loss= %.5f var_loss= %.5f cor_loss= %.5f ' %
                          (hmap_loss.item(), tblr_loss.item(),mean_loss.item(),var_loss.item(),ssb_loss.item())+"\n")
                step = len(train_loader) * epoch + batch_idx

        return

    def val_map(args,epoch):
        print('\n Val@Epoch: %d' % epoch)
        model.eval()
        torch.cuda.empty_cache()
        max_per_image = 100

        results = {}
        with torch.no_grad():
            for inputs in val_loader:
                img_id, inputs = inputs[0]

                detections = []
                for scale in inputs:
                    inputs[scale]['image'] = inputs[scale]['image'].to(args.device)
                    output = model(inputs[scale]['image'])[-1]
                    if args.method == "cls_reg":
                        output = output[:-1]
                        dets = ctdet_decode_csl_one(*output,reg_max=args.reg_max, K=args.test_topk,omega = args.omega)
 
                        #--------------------------------------------------------------------------------------------------------------------                       
                    dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])[0]

                    top_preds = {}
                    dets[:, :2] = transform_preds(dets[:, 0:2],
                                                  inputs[scale]['center'],
                                                  inputs[scale]['scale'],
                                                  (inputs[scale]['fmap_w'], inputs[scale]['fmap_h']))
                    dets[:, 2:4] = transform_preds(dets[:, 2:4],
                                                   inputs[scale]['center'],
                                                   inputs[scale]['scale'],
                                                   (inputs[scale]['fmap_w'], inputs[scale]['fmap_h']))
                    clses = dets[:, -1]
                    for j in range(val_dataset.num_classes):
                        inds = (clses == j)
                        top_preds[j + 1] = dets[inds, :5].astype(np.float32)
                        top_preds[j + 1][:, :4] /= scale

                    detections.append(top_preds)

#                 bbox_and_scores = {j: np.concatenate([d[j] for d in detections], axis=0)
#                                    for j in range(1, val_dataset.num_classes + 1)}
#############################################################################################
#用了nms
                bbox_and_scores = {}
                for j in range(1, val_dataset.num_classes+1):# if cfg.dataset == 'ssdd_coco',为2
                    bbox_and_scores[j] = np.concatenate([d[j] for d in detections], axis=0)
                    soft_nms(bbox_and_scores[j], Nt=0.5, method=2)
#                     if len(cfg.test_scales) > 1:
#                         soft_nms(bbox_and_scores[j], Nt=0.5, method=2)
#############################################################################################                
                scores = np.hstack([bbox_and_scores[j][:, 4] for j in range(1, val_dataset.num_classes + 1)])
                if len(scores) > max_per_image:
                    kth = len(scores) - max_per_image
                    thresh = np.partition(scores, kth)[kth]
                    for j in range(1, val_dataset.num_classes + 1):
                        keep_inds = (bbox_and_scores[j][:, 4] >= thresh)
                        bbox_and_scores[j] = bbox_and_scores[j][keep_inds]

                results[img_id] = bbox_and_scores
        
        eval_results = val_dataset.run_eval(results)
        eval_results_VOC = val_dataset_VOC.run_eval(results)

        with open(save_path + '/map.txt', 'a+') as f:
            map = eval_results[0]
            ap50 = eval_results[1]
            eval_results_ = "\n".join(str(i) for i in eval_results)
            f.write('\n')
            f.write("epoch : " + str(epoch) + "\n" + eval_results_ + "\n")
            f.write('\n')
            for k,v in eval_results_VOC[1].items():
            
                f.write(k+" : "+'%.2f%%' % (v * 100)+'\n')
            f.write('mAP: '+ '%.2f%%' % (eval_results_VOC[0] * 100)+"\n")

        return map,ap50

    print('Starting training...')


    map = 0
    ap50 =0
    for epoch in range(1, args.num_epochs + 1):
#         train_sampler.set_epoch(epoch)
        train(epoch)
        if args.val_interval > 0 and epoch % args.val_interval == 0:
            map_,ap50_ = val_map(args,epoch)
            if map_>map:
#                 torch.save(model.module.state_dict(),save_path+'/best_map.pth')
                torch.save(model.state_dict(),save_path+'/best_map.pth')
                map = map_
            if ap50_>ap50:
#                 torch.save(model.module.state_dict(),save_path+'/best_ap50.pth')
                torch.save(model.state_dict(),save_path+'/best_ap50.pth')
                ap50 =ap50_

        lr_scheduler.step(epoch)  # move to here after pytorch1.1.0

#     torch.save(model.module.state_dict(),save_path+'/last.pth')
    torch.save(model.state_dict(),save_path+'/last.pth')




if __name__ == '__main__':
    with DisablePrint(local_rank=args.local_rank):
        main()
