from __future__ import print_function

import os
import argparse
import numpy as np
import time
import glob
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import matplotlib.pyplot as plt
from tqdm import tqdm


from eval import evaluate
from datasets import *
from utils.utils import *


mixed_precision = False
#------------------混合精度---------------------------
try:
    from torch.cuda.amp import autocast as autocast
except:
    print('fail to speed up training via apex \n')
    mixed_precision = False  # not installed
print("是否使用混合精度训练:",mixed_precision)
#-----------------------------------------------------
# try:  
#     from apex import amp
# except:
#     print('fail to speed up training via apex \n')
#     mixed_precision = False  # not installed

DATASETS = {'UCAS_AOD':NWPUDataset,
            'SSDD':SSDDDataset,
            'HRSID':SSDDDataset,
            }


def train_model(args, hyps):
    args.save_path = "../"+ "_Weights/"+'dal/' + args.dataset + "/" +args.method
    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)
    #  parse configs
    epochs = int(hyps['epochs'])
    batch_size = int(hyps['batch_size'])
    results_file = args.save_path + os.sep+'result.txt'
    weight =  args.save_path + os.sep + 'last.pth' if args.resume or args.load else args.weight
    last = args.save_path + os.sep + 'last.pth'
    best = args.save_path + os.sep + 'best.pth'
    start_epoch = 0
    best_fitness = 0 #   max f1
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # creat folder


    for f in glob.glob(results_file):
        os.remove(f)

    # multi-scale
    if args.multi_scale:
        scales = args.training_size + 32 * np.array([x for x in range(-1, 5)])
        # set manually
        # scales = np.array([384, 480, 544, 608, 704, 800, 896, 960])
        print('Using multi-scale %g - %g' % (scales[0], scales[-1]))   
    else :
        scales = args.training_size 
############

    # dataloader
    assert args.dataset in DATASETS.keys(), 'Not supported dataset!'
    ds = DATASETS[args.dataset](dataset=args, augment=args.augment)
    
    #collater:如何取样本的，我们可以定义自己的函数来准确地实现想要的功能,用来对齐图片尺寸
    #在进行训练时，我们必须要保证每个batch中图片的shape完全一致。注意每张图片的短边可能都不一样，
    #有的可能是图片的宽，有的可能是图片的宽。因此，我们将所有图片都填充到1333x1333也就是resize后长边的尺寸，空白的地方全部用0值填充。
    collater = Collater(scales=scales, keep_ratio=True, multiple=32)
    
    #当计算机的内存充足的时候，可以设置pin_memory=True。当系统卡住，或者交换内存使用过多的时候，设置pin_memory=False。
    loader = data.DataLoader(
        dataset=ds,
        batch_size=batch_size,
        num_workers=0,
        collate_fn=collater,
        shuffle=True,
        pin_memory=True,
        drop_last=True
    )

    # Initialize model
    init_seeds()
    #如果cas(级联)存在
    if 'cas' in args.method:
        from models.model_cas import RetinaNet
    else:
        from models.model import RetinaNet
    model = RetinaNet(backbone=args.backbone, hyps=hyps)

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=hyps['lr0'])
    # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[round(epochs * x) for x in [0.6, 0.8]], gamma=0.1)
    try:
        from torch_warmup_lr import WarmupLR
        scheduler = WarmupLR(scheduler, init_lr=hyps['warmup_lr'], num_warmup=hyps['warm_epoch'], warmup_strategy='cos')
    except:
        pass
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=20, T_mult=1, eta_min = 1e-5)
    scheduler.last_epoch = start_epoch - 1
    ######## Plot lr schedule #####
#     y = []
#     for _ in range(epochs):
#         scheduler.step()
#         y.append(optimizer.param_groups[0]['lr'])
#     plt.plot(y, label='LR')
#     plt.xlabel('epoch')
#     plt.ylabel('LR')
#     plt.tight_layout()
#     plt.savefig('LR.png', dpi=300)    
#     import ipdb; ipdb.set_trace()
    ###########################################

  
    
    # load chkpt
    if weight.endswith('.pth'):
        chkpt = torch.load(weight)
#         print("cccccccccccccc",chkpt.keys())
#         print("23222222222222222222",chkpt['model'].keys())
        # load model
        if 'model' in chkpt.keys() :

            model.load_state_dict(chkpt['model'])
        else:
            model.load_state_dict(chkpt)
        # load optimizer
        if 'optimizer' in chkpt.keys() and chkpt['optimizer'] is not None and args.resume :
            optimizer.load_state_dict(chkpt['optimizer'])
            best_fitness = chkpt['best_fitness']
            for state in optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.cuda()
        # load results
        if 'training_results' in chkpt.keys() and  chkpt.get('training_results') is not None and args.resume:
            with open(results_file, 'w') as file:
                file.write(chkpt['training_results'])  # write results.txt
        if args.resume and 'epoch' in chkpt.keys():
            start_epoch = chkpt['epoch'] + 1   

        del chkpt
    print("当前device：",torch.cuda.current_device())
    if torch.cuda.is_available():
        model.cuda()
    if torch.cuda.device_count() > 2:
        model = torch.nn.DataParallel(model).cuda()
    print(model)
    
#     if mixed_precision:
#         model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
    if mixed_precision:
        scaler = torch.cuda.amp.GradScaler(enabled=True)
        
        
    model_info(model, report='summary')  # 'full' or 'summary'
    # 'P', 'R', 'mAP', 'F1'
    results = (0, 0, 0, 0)

    for epoch in range(start_epoch,epochs):

        if 'cas' in args.method:
            print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'ref', 'cls', 'reg', 'total', 'targets', 'img_size'))
            mloss = torch.zeros(3).cuda()
        else:
            print(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem',  'cls', 'reg', 'total', 'targets', 'img_size'))
            mloss = torch.zeros(2).cuda()
            
        pbar = tqdm(enumerate(loader), total=len(loader))  # progress bar
        for i, (ni, batch) in enumerate(pbar):
            
            model.train()

            if args.freeze_bn:
                if torch.cuda.device_count() > 2:
                    model.module.freeze_bn()
                else:
                    model.freeze_bn()

            optimizer.zero_grad()
            ims, gt_boxes = batch['image'], batch['boxes']
            if torch.cuda.is_available():
                ims, gt_boxes = ims.cuda(), gt_boxes.cuda()
            if mixed_precision:
                with autocast():
                    losses = model(ims,args.method, gt_boxes,process =epoch/epochs )
                    loss_cls, loss_reg = losses['loss_cls'].mean(), losses['loss_reg'].mean()
                    
                    if losses.__contains__('loss_ref'):
                        loss_ref = losses['loss_ref'].mean()
                        loss = loss_cls + (loss_reg + loss_ref)*0.5 
                    else:
                        loss = loss_cls + loss_reg

            else:
                losses = model(ims,args.method, gt_boxes,process =epoch/epochs )
                loss_cls, loss_reg = losses['loss_cls'].mean(), losses['loss_reg'].mean()
                if losses.__contains__('loss_ref'):
                    loss_ref = losses['loss_ref'].mean()
                    loss = loss_cls + (loss_reg + loss_ref)*0.5 
                else:
                    loss = loss_cls + loss_reg               
            if not torch.isfinite(loss):
                import ipdb; ipdb.set_trace()
                print('WARNING: non-finite loss, ending training ')
                break
            if bool(loss == 0):
                continue

            # calculate gradient
            if mixed_precision:
#                 with amp.scale_loss(loss, optimizer) as scaled_loss:
#                     scaled_loss.backward()
                scaler.scale(loss).backward()
                # scaler.step() 首先把梯度的值unscale回来.
                # 如果梯度的值不是 infs 或者 NaNs, 那么调用optimizer.step()来更新权重,
                # 否则，忽略step调用，从而保证权重不更新（不被破坏）
                nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                scaler.step(optimizer)

                # 准备着，看是否要增大scaler
                scaler.update()
            else:
                loss.backward()

                nn.utils.clip_grad_norm_(model.parameters(), 0.1)
                optimizer.step()

            # Print batch results
            if losses.__contains__('loss_ref'):
                loss_items = torch.stack([loss_ref, loss_cls, loss_reg], 0).detach()
            else: 
                loss_items = torch.stack([loss_cls, loss_reg], 0).detach()
            mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
            mem = torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0  # (GB)
            if 'cas' in args.method:
                s = ('%10s' * 2 + '%10.3g' * 6) % (
                      '%g/%g' % (epoch, epochs - 1), '%.3gG' % mem, *mloss, mloss.sum(), gt_boxes.shape[1], min(ims.shape[2:]))
            else:
                s = ('%10s' * 2 + '%10.3g' * 5) % (
                      '%g/%g' % (epoch, epochs - 1), '%.3gG' % mem, *mloss, mloss.sum(), gt_boxes.shape[1], min(ims.shape[2:]))                
            pbar.set_description(s)

        # Update scheduler
        scheduler.step()
        final_epoch = epoch + 1 == epochs
        
        # eval
        if hyps['test_interval']!= -1 and epoch % hyps['test_interval'] == 0  :
            if torch.cuda.device_count() > 2:
                results = evaluate(args = args,target_size=args.target_size,
                                   test_path=args.test_path,
                                   dataset=args.dataset,
                                   model=model.module, 
                                   hyps=hyps,
                                   conf = 0.01 if final_epoch else 0.1)    
            else:
                results = evaluate(args = args,target_size=args.target_size,
                                   test_path=args.test_path,
                                   dataset=args.dataset,
                                   model=model,
                                   hyps=hyps,
                                   conf = 0.01 if final_epoch else 0.1) #  p, r, map, f1

        
        # Write result log
        with open(results_file, 'a') as f:
            f.write(s + '%10.3g' * 4 % results + '\n')  # P, R, mAP, F1, test_losses=(GIoU, obj, cls)

        ##   Checkpoint
        if args.dataset in ['IC15', ['IC13']]:
            fitness = results[-1]   # Update best f1
        else :
            fitness = results[-2]   # Update best mAP
        if fitness > best_fitness:
            best_fitness = fitness

        with open(results_file, 'r') as f:
            # Create checkpoint
            chkpt = {'epoch': epoch,
                     'best_fitness': best_fitness,
                     'training_results': f.read(),
                     'model': model.module.state_dict() if type(
                        model) is nn.parallel.DistributedDataParallel else model.state_dict(),
                     'optimizer': None if final_epoch else optimizer.state_dict()}
        

        # Save last checkpoint
        torch.save(chkpt, last)
        # Save best checkpoint
        if best_fitness == fitness:
            torch.save(chkpt, best) 

#         if (epoch % hyps['save_interval'] == 0  and epoch > 40) or final_epoch:
#             if torch.cuda.device_count() > 2:
#                 torch.save(chkpt, args.save_path+'/deploy%g.pth'% epoch)
#             else:
#                 torch.save(chkpt, args.save_path+'/deploy%g.pth'% epoch)

    # end training
    dist.destroy_process_group() if torch.cuda.device_count() > 2 else None
    torch.cuda.empty_cache()



if __name__ == '__main__':
    
    parser = argparse.ArgumentParser(description='Train a detector')
    # config
    parser.add_argument('--hyp', type=str, default='hyp.py', help='hyper-parameter path')
    # network
    parser.add_argument('--backbone', type=str, default='res50')
    parser.add_argument('--freeze_bn', type=bool, default=False)

    parser.add_argument('--weight', type=str, default='')   # 
    parser.add_argument('--multi-scale', action='store_true', help='adjust (67% - 150%) img_size every 10 batches')
   
    parser.add_argument('--dataset', type=str, default='HRSID')#选择要训练的数据集(SSDD,HRSID)
    
    #method需要自己命名,保证权重文件夹名字不同。目前有retinanet和dal两种方法，注意命名里必须带retina跟dal，以选择不同的loss训练。
    parser.add_argument('--method', type=str, default='_retina_test_0.5',choices=['dal','retina'])#_retina_cas_test_0.5_0.6
    #以下为放置数据集的格式
    parser.add_argument('--train_path', type=str, default='../_DATASET/SSDD_dal/ImageSets/train.txt')
    parser.add_argument('--test_path', type=str, default='../_DATASET/SSDD_dal/ImageSets/test.txt')
    parser.add_argument('--data_path', type=str, default='../_DATASET/SSDD_dal')
    parser.add_argument('--gt_path', type=str, default='../_DATASET/SSDD_dal/Ground-Truth')
    
    #数据集的训练尺寸,这里的size指的是短边，长边根据比例进行放大
    parser.add_argument('--training_size', type=int, default=800)
    
    parser.add_argument('--resume', action='store_true', help='resume training from last.pth')
    parser.add_argument('--load', action='store_true', help='load training from last.pth')
    parser.add_argument('--augment', action='store_true', help='data augment')
    
    #数据集的测试尺寸
    parser.add_argument('--target_size', type=int, default=[800])   
    #

    args = parser.parse_args()
    hyps = hyp_parse(args.hyp)
    
    if args.dataset == "SSDD":
        args.train_path = '../_DATASET/SSDD_dal/ImageSets/train.txt'
        args.test_path = '../_DATASET/SSDD_dal/ImageSets/test.txt'
        args.data_path = '../_DATASET/SSDD_dal'
        args.gt_path = '../_DATASET/SSDD_dal/Ground-Truth'
    elif args.dataset == "HRSID":
        args.train_path = '../_DATASET/HRSID/ImageSets/train.txt'
        args.test_path = '../_DATASET/HRSID/ImageSets/test.txt'
        args.data_path = '../_DATASET/HRSID'
        args.gt_path = '../_DATASET/HRSID/Ground-Truth'
    print(args)
    print(hyps)

    train_model(args, hyps)