# -*- coding: utf-8 -*-
import os
import sys
import torch
import numpy as np
from loss.Loss import CtdetLoss
from torch.utils.data import DataLoader
from utils.optim import IterWarmUpCosineDecayMultiStepLRAdjust,PlainRAdam
import torch.nn as nn
from tqdm import tqdm
import matplotlib.pyplot as plt
import argparse
from eval import evaluate
from utils.utils import *
from datasets_loader.ssdd import SSDD
from datasets_loader.hrsid import HRSID
sys.path.append(r'./backbone') 
mixed_precision = True
try:  
    from apex import amp
except:
    print('fail to speed up training via apex \n')
    mixed_precision = False  # not installed

    
DATASETS = {'SSDD':SSDD,     
            'HRSID':HRSID}#'VOC' : VOCDataset ,'IC15': IC15Dataset,'IC13': IC13Dataset,


def train_models(args, hyps):
    args.save_path = ".."+os.sep+ "_Weights"+os.sep+'rcenternet'+os.sep + args.dataset + os.sep +args.method
    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)
    epochs = int(hyps['epochs'])
    batch_size = int(hyps['batch_size'])
    results_file = args.save_path + os.sep+'result.txt'
    weight =  args.save_path + os.sep + 'last.pth' 
    last = args.save_path + os.sep + 'last.pth'
    best = args.save_path + os.sep + 'best.pth'

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    args.lr_step = [int(s) for s in args.lr_step.split(',')]

    with open(args.save_path + '/cfg.txt', 'w') as f:
        for arg in vars(args):  # var()返回对象object的属性和属性值的字典对象
            print('%s: %s' % (arg, getattr(args, arg)), file=f)  # 将结果输出到f文件
    
    args.num_classes = int(hyps['num_classes'])
    
    head = {'hm': args.num_classes, 'wh': 2, 'ang':1, 'reg': 2}
    loss_weight={'hm':1,'wh':0.1,'ang':0.02,'reg':0.1}
    criterion = CtdetLoss(loss_weight)
#-------------------------dataloader------------------------------------------------------#        
    assert args.dataset in DATASETS.keys(), 'Not supported dataset!'     
    train_dataset = DATASETS[args.dataset](args,split='train')
    train_loader = DataLoader(train_dataset,batch_size=batch_size ,shuffle=True,num_workers=args.num_workers)

    print('the dataset has %d images' % (len(train_dataset)))
    
    #device_ids = [0]
#说明：GPU编号，一块显卡集成两块GPU，则同一块卡的编号不能同时出现。
    device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if "dlanet" in args.method or "dla" in args.method:
        if "dcn" in args.method:
            print("using ",'dlanet_dcn')
            from dlanet_dcn import DlaNet
        elif 'drn' in args.method:
            print('using','dlanet_drn')
            from dlanet_dcn_drn import DlaNet
        else:
            print("using ",'dlanet')
            from dlanet import DlaNet
        model = DlaNet(34,heads = head )
    elif "swin" in args.method:
        print("using ",'swin')
        from swinT import SwinTransformer 
        model = SwinTransformer(heads = head,
                     pretrain_img_size=224,
                     patch_size=4,
                     in_chans=3,
                     embed_dim=96,
                     depths=[2, 2, 18, 2],#[2, 2, 18, 2]for small,[2, 2, 6, 2]for tiny
                     num_heads=[3, 6, 12, 24],
                     window_size=7,
                     mlp_ratio=4.,
                     qkv_bias=True,
                     qk_scale=None,
                     drop_rate=0.,
                     attn_drop_rate=0.,
                     drop_path_rate=0.2,
                     norm_layer=nn.LayerNorm,
                     ape=False,#默认是false
                     patch_norm=True,
                     out_indices=(0, 1, 2, 3),
                     frozen_stages=-1,
                     use_checkpoint=False,)
        pretrained_net = torch.load('../pretrain/swin_small_patch4_window7_224.pth')
#         print(pretrained_net.keys())
#         print(pretrained_net)
        model.load_state_dict(pretrained_net['model'], strict=False)
    elif "resnet" in args.method:
        print("using",'resnet')
        from ctrbox_net import CTRBOX
        model = CTRBOX(heads=head,
                                  pretrained=True,
                                  down_ratio=args.down_ratio,
                                  final_kernel=1,
                                  head_conv=256)
        
    print('cuda', torch.cuda.current_device(), torch.cuda.device_count())

    print(model)

    model.to(device)
    model.cuda()
    model.train()

    params=[]
    params_dict = dict(model.named_parameters())
    for key,value in params_dict.items():
        params += [{'params':[value],'lr':hyps['lr0']}]
        
#-------------------------------------换优化器------------------------------------------------------- #  
    #optimizer = torch.optim.SGD(params, lr=args.learning_rate, momentum=0.9, weight_decay=5e-4)
    optimizer = PlainRAdam(model.parameters(), args.learning_rate)
#     optimizer = torch.optim.Adam(params, lr=hyps['lr0'], weight_decay=5e-4)
#-------------------------------------换优化器------------------------------------------------------- # 

    if mixed_precision:
        model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)## 这里是“欧一”，不是“零一”

            
#-------------------------------------换学习策略------------------------------------------------------- #  
#     scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9, last_epoch=-1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_step, gamma=0.1)
#-------------------------------------换学习策略------------------------------------------------------- #  
    
    num_iter = 0
    train_loss_all=[]
    ap_list = []

    init_mAP = 0.
    for epoch in range(epochs+1):

        print('-'*10)
        print('Epoch: {}/{} '.format(epoch, epochs))

        model.train()
        mloss = torch.zeros(5).cuda()#loss的mean
        print(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem',  'hm', 'wh','reg','ang', 'total'))
        pbar = tqdm(enumerate(train_loader), total=len(train_loader))  # progress bar
        for i, (ni, sample) in enumerate(pbar):

            for k in sample:
                sample[k] = sample[k].to(device=device, non_blocking=True)
            pred = model(sample['input'])
            
            hm_loss,wh_loss,off_loss ,ang_loss = criterion(pred, sample) 
   
            
            loss = loss_weight['hm'] * hm_loss + loss_weight['wh'] * wh_loss + loss_weight['reg'] * off_loss + loss_weight['ang'] * ang_loss
    
            if not torch.isfinite(loss):
                import ipdb; ipdb.set_trace()
                print('WARNING: non-finite loss, ending training ')
                break
            if bool(loss == 0):
                continue

            optimizer.zero_grad()
           
            # calculate gradient
            if mixed_precision:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()                
            optimizer.step()
                    # Print batch results
            loss_items = torch.stack([hm_loss, wh_loss, off_loss, ang_loss,loss], 0).detach()
            mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
            try:
                mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0  # (GB)
            except:
                mem = 0
            s = ('%10s' * 2 + '%10.4g' * 5) % ('%g/%g' % (epoch, epochs - 1), '%.3gG' % mem, *mloss)
            pbar.set_description(s)    
            

        
        scheduler.step(epoch)
                
        torch.save(model.state_dict(),last)       
        print("epoch",epoch)
        if hyps['test_interval']!= -1 and (epoch % hyps['test_interval'] == 0 or epoch==epochs ):
            model.eval() 
            args.part = 'all'
            args.test_path = os.path.join(args.data_path,'ImageSets','test.txt') 
            results = evaluate(args,
                               model,
                               down_ratio=4,
                               hyps=None)#  p, r, map, f1     
            if results[2] > init_mAP:
                init_mAP = results[2]
                print('get mAP ',results[2])
                torch.save(model.state_dict(),best)  

            with open(results_file, 'a') as f:
                f.write('%10.3g' * 4 % results + '\n')  # P, R, mAP, F1, test_losses=(GIoU, obj, cls)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()    
    #---------------------------------换数据集必改-----------------------------
    parser.add_argument('--dataset', default='SSDD',choices=['SSDD','HRSID'],help='dataset name') #测试时要指定数据集
    parser.add_argument('--data_path',default='../_DATASET/SSDD',help='dataset path')#'../_DATASET/HRSID/VOC'
    parser.add_argument('--input_h', type=int, default = 608, help='Resized image height')
    parser.add_argument('--input_w', type=int, default = 608, help='Resized image width')
    parser.add_argument('--max_obj_per_img',default=200,type=int)  
    #--------------------------------------------------------------------------
    
    #---------------------------------换网络必改---------------------------------
    parser.add_argument('--method', default='dla_drn')#vgl_a10_b0.2_w3
    #--------------------------------------------------------------------------
    # config
    parser.add_argument('--hyp', type=str, default='hyp.py', help='hyper-parameter path')

    parser.add_argument('--keep_res', default = False)
    parser.add_argument('--use_07_metric', default = False,type =bool)
    parser.add_argument('--split_ratio', type=float, default=1)
    parser.add_argument('--part',default = 'all')
    parser.add_argument('--phase', default = 'train',choices =['train','test'])  # train or test (1,0)
    parser.add_argument('--lr_step', type=str, default='60,80')
    parser.add_argument('--learning_rate', '--lr', default=1e-4, type=float)
    parser.add_argument('--down_ratio',type=int,default = 4)
    parser.add_argument('--num_workers',default = 2,type=int)
 
    args = parser.parse_args()
    hyps = hyp_parse(args.hyp)
    train_models(args, hyps)
