import os
from tqdm import tqdm

import argparse

from functools import partial
from collections import Counter

import torch
from torch.utils.data import DataLoader

from utils.dataset import VOCDataset, worker_init_fn, box_label_dataset_collate
from utils.model import load_model_weights
from utils.scheduler import set_lr_policy
from utils.runtime import HistoryRecord, show_config
from utils.evaluation import EvalCallback

from settings import set_dataset, set_detector, set_optimizer, set_exp_dir, set_computing_env

def set_train_configuration():
    parser = argparse.ArgumentParser()
    # Dataset configuration
    parser.add_argument('--dataset', default='NWPUv2', type=str, help='Dataset:NWPUv1, NWPUv2, DIOR')
    parser.add_argument('--dataset_path', type=str, default='dataset/NWPUv2')
    parser.add_argument('--num_workers', type=int, default=8)

    # Model configuration
    parser.add_argument('--detector', type=str, default='FasterRCNN',help='Detector:FasterRCNN, YOLOv3')
    # TODO 支持从网络加载已经训练好的detecgtor weight
    parser.add_argument('--model_path', type=str, default=None, help='Detector weight path')
    parser.add_argument('--mode', type=str, default='train', help='mode: train, inference')

    # TODO 支持多种不同的pretrain加载方式，为SSL Visual Representation预留
    parser.add_argument('--pretrained', type=bool, default=True, help='use pretrained backbone')
    parser.add_argument('--pretrained_style', type=str, default=None, help='Pretrain style: random_init, imagenet, SSL.') 
    
    parser.add_argument('--backbone', type=str, default='resnet50', help='Backbone: resnet50, resnet101')
    parser.add_argument('--neck', type=str, default='FPN', help='Neck: FPN' ) 
    parser.add_argument('--rpn_head', type=str, default='RPN', help='Densehead: RPN') 
    parser.add_argument('--anchor_scales', type=list, default=[4, 8, 16], help='anchor的尺度') 
    parser.add_argument('--anchor_ratios', type=list, default=[0.5, 1, 2], help='anchor生成的比例') 
    parser.add_argument('--feat_stride', type=int, default = 16) 
    parser.add_argument('--roi_head', type=str, default='Resnet50RoIHead')   

    # Optimizer configuration
    # TODO 多种不同的学习率调整方式支持
    parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer: sgd, adam') 
    parser.add_argument('--lr', type=float, default=0.01, help='Init learning rate')
    parser.add_argument('--momentum', type=float, default=0.9)
    parser.add_argument('--weight_decay', type=float, default=1e-4)
    parser.add_argument('--lr_policy', type=str, default='step', help='warmup, cos, step')
    parser.add_argument('--warmup_epochs', type=int, default=10) 
    parser.add_argument('--warmup_init_lr', type=int, default=0.001) 
    parser.add_argument('--steps', type=list, default=[60,80,90])

    # Bbox Decoder使用，高于nms_iou的bbox被认为是对同一个GT的预测，只保留confidence最高的那个
    parser.add_argument('--nms_iou', type=int, default=0.3)
    # Bbox Decoder使用，高于confidence的bbox被认为是有效的，因为后续用于评估指标，所以取值较低
    parser.add_argument('--confidence', type=int, default=0.05)

    # Runtime configuration
    parser.add_argument('--exps_dir', type=str, default='./exps/')
    parser.add_argument('--exp_name', type=str, default=None)
    parser.add_argument('--record_type', type=str, default=['tb', 'txt', 'plot'], help='record type: tb, txt, plot')
    parser.add_argument('--resume', type=bool, default=False)
    parser.add_argument('--ckpt_path', type=str, default= None)
    parser.add_argument('--fp16', type=bool, default=True)
    parser.add_argument('--start_epoch', type=int, default=0)
    parser.add_argument('--freeze_epoch', type=int, default=10)
    parser.add_argument('--total_epoch', type=int, default=100)
    parser.add_argument('--bs', type=int, default=16, help='Image batch size')
    # TODO freeze和Unfreeze使用不同的bs
    # parser.add_argument('--freeze_bs', type=int, default=16, help='Freeze batch size')
    parser.add_argument('--save_period', type=int, default=1)
    parser.add_argument('--eval_flag', type=bool, default=True)
    parser.add_argument('--eval_period', type=int, default=1)

    # Computing env configuration
    parser.add_argument('--seed', type=int, default=2023)
    parser.add_argument('--gpu_id', type=int, nargs='+', default=[0], help='None: cpu')

    return parser.parse_args()

if __name__ == "__main__":
    print("\n\033[1;36;40mConfiguring Training Options....................\033[0m")
    # 0. 获取配置参数，使用args在各个环节之间传递信息
    args = set_train_configuration()

    # 1. 配置计算环境： CPU, 单GPU, DP模式
    # TODO DDP模式
    args.device, args.cuda = set_computing_env(args)

    # 2. 配置数据集
    args.input_shape, class_names, args.num_classes,  num_train, num_val, num_test, train_lines, val_lines, test_lines = set_dataset(args)
    num_train_iter      = num_train // args.bs
    num_val_iter  = num_val // args.bs

    train_dataset     = VOCDataset(train_lines, args.input_shape, train = True)
    val_dataset       = VOCDataset(val_lines, args.input_shape, train = False)

    # DataLoader： bs,img,bbox,label
    train_dataloader  = DataLoader(train_dataset, shuffle = True, batch_size = args.bs, num_workers = args.num_workers, pin_memory=args.cuda,
                                drop_last=True, collate_fn=box_label_dataset_collate, 
                                worker_init_fn=partial(worker_init_fn, rank=0, seed=args.seed))
    val_dataloader    = DataLoader(val_dataset  , shuffle = True, batch_size = args.bs, num_workers = args.num_workers, pin_memory=args.cuda, 
                                drop_last=True, collate_fn=box_label_dataset_collate, 
                                worker_init_fn=partial(worker_init_fn, rank=0, seed=args.seed))
    
    # 3. 配置模型
    # 模型构建和初始化
    args.detector, args.loss, trainer, decoder = set_detector(args)

    # 载入权重
    load_model_weights(args)

    # 模型放到设备上
    args.model = args.detector.to(args.device)

    if args.cuda and len(args.gpu_id)>1:
        args.model = torch.nn.DataParallel(args.model, device_ids=args.gpu_id, output_device=args.gpu_id[0])
        print('\n\033[1;33;40mUsing DataParallel\033[0m')

    # 4. 配置优化器
    optimizer = set_optimizer(args)

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda = set_lr_policy(args), last_epoch=-1)

    # 5. 配置实验过程中的记录、显示、评估
    exp_dir = set_exp_dir(args)

    record_history    = HistoryRecord(exp_dir, args.detector, args.input_shape, args.record_type)

    eval_callback   = EvalCallback(args.detector, decoder, args.input_shape, class_names, args.num_classes, val_lines, exp_dir, args.cuda, \
                                    confidence=args.confidence, nms_iou = args. nms_iou, \
                                    eval_flag=args.eval_flag, period=args.eval_period)
    
    show_config(exp_dir, Experiment=exp_dir, Seed = args.seed, GPU = [gpu for gpu in args.gpu_id],\
                input_shape = args.input_shape, num_train = num_train, num_val = num_val, num_test = num_test, \
                Start_Epoch = args.start_epoch, Freeze_Epoch = args.freeze_epoch, Total_Epoch = args.total_epoch, \
                Batch_size = args.bs, Train_iters = num_train_iter, Val_iters = num_val_iter, \
                Init_lr = args.lr, optimizer_type = args.optimizer, momentum = args.momentum, Weight_decay = args.weight_decay, lr_policy = args.lr_policy, Warmup = args.warmup_epochs, \
                save_period = args.save_period, eval_period = args.eval_period, \
                fp16=args.fp16, resume_from = args.ckpt_path if args.resume else None, load_from = args.model_path
                )   
    
    if args.resume:
        print("\n\033[1;32;40mResume from Checkpoint: {}\033[0m".format(args.ckpt_path))
        checkpoint = torch.load(args.ckpt_path, map_location = args.device)
        args.start_epoch = checkpoint['epoch'] + 1
        # args.detector.load_state_dict(checkpoint['weights'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        record_history.ckpt_record = checkpoint['record']
        eval_callback.eval_map_record = checkpoint['eval_map']
        print("\n\033[1;32;40mStart Training from epoch {}\033[0m".format(args.start_epoch + 1))  
    
    # 6. 开始epoch-based训练
    if args.fp16 and args.cuda:
        from torch.cuda.amp import GradScaler as GradScaler
        scaler = GradScaler()
    else:
        scaler = None
    print("\n\033[1;36;40m====>   Start epoch training \033[0m")
    for epoch in range(args.start_epoch, args.total_epoch):
        # 冻结backbone训练
        if epoch < args.freeze_epoch:
            for param in args.detector.backbone.parameters():
                param.requires_grad = False
        else:
            for param in  args.detector.backbone.parameters():
                param.requires_grad = True
        
        losses = {}
        losses_all = {}
        train_loss_all = 0
        val_loss_all = 0

        print("\n\033[1;36;40mStart Train\033[0m")
        args.model.train()
        with tqdm(total=num_train_iter,desc=f'Epoch {epoch + 1}/{args.total_epoch}',postfix=dict,mininterval=0.3) as pbar:
            for iteration, batch in enumerate(train_dataloader):
                if iteration >= num_train_iter:
                    break
                images, bboxes, labels = batch[0], batch[1], batch[2]
                with torch.no_grad():
                    images = images.to(args.device)

                optimizer.zero_grad()

                if args.fp16 and args.cuda:
                    from torch.cuda.amp import autocast
                    with autocast():
                        losses =  trainer(args.model, args.loss, images, bboxes, labels )
                    scaler.scale(losses['train_loss']).backward()
                    scaler.step(optimizer)
                    scaler.update()
                else:

                    losses =  trainer(args.model, args.loss, images, bboxes, labels )

                    losses['train_loss'].backward()

                    optimizer.step()

                for k in losses.keys():
                    losses[k] = losses[k].item()

                losses['lr'] = scheduler.get_last_lr()[0]

                # 实时显示当前iter的loss情况
                pbar.set_postfix(**losses) 
                pbar.update(1)                

                losses_all = dict(Counter(losses_all) + Counter(losses)) 
        # 1个epoch中的loss累积
        train_loss_all = losses_all['train_loss']
        # 一个epoch调整一次学习率
        scheduler.step() 
        print('Finish Train')

        print("\n\033[1;36;40mStart Validation\033[0m")
        args.model.eval()
        with tqdm(total=num_val_iter, desc=f'Epoch {epoch + 1}/{args.total_epoch}',postfix=dict,mininterval=0.3) as pbar:
            for iteration, batch in enumerate(val_dataloader):
                if iteration >= num_val_iter:
                    break
                images, bboxes, labels = batch[0], batch[1], batch[2]
                
                with torch.no_grad():
                    images = images.to(args.device)

                    optimizer.zero_grad()

                    losses =  trainer(args.model, args.loss, images, bboxes, labels)

                    val_loss = losses['train_loss'].item()
         
                    pbar.set_postfix(**{'val_loss'  : val_loss})
                    pbar.update(1)
                
                val_loss_all += val_loss
        print('Finish Validation')
        print('Epoch:'+ str(epoch + 1) + '/' + str(args.total_epoch))
         # 此处计算的是一个epoch中每个iter的平均损失
        print('Train Loss: %.3f || Val Loss: %.3f ' % (train_loss_all / num_train_iter, val_loss_all / num_val_iter))

        # 使用验证集进行训练时评估，得到指标结果
        eval_callback.on_epoch_end(epoch + 1)

        #-----------------------------------------------#
        #   对本轮训练情况进行记录
        #-----------------------------------------------#       
        record = losses_all
        for k, v in record.items():
            record[k] = record[k] / num_train_iter

        record['val_loss'] = val_loss_all / num_val_iter
        
        record_history.append_record(epoch + 1, record)
    
        #-----------------------------------------------#
        #   保存checkpoint，其中包含了训练状态信息和模型权重
        #-----------------------------------------------#
        ckpt_dict = {
                    "epoch": epoch,
                    'weights': args.detector.state_dict(), 
                    "optimizer": optimizer.state_dict(),
                    "scheduler": scheduler.state_dict(),
                    "record": record_history.ckpt_record,
                    "eval_map": eval_callback.eval_map_record
                    }
        if (epoch + 1) % args.save_period == 0 or epoch + 1 == args.total_epoch:
            torch.save(ckpt_dict, os.path.join(exp_dir, 'ckpt_ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, train_loss_all / num_train_iter, val_loss_all / num_val_iter)))
                
        # torch.save(args.detector.state_dict(), os.path.join(exp_dir, "last_epoch_weights.pth"))
        torch.save(ckpt_dict, os.path.join(exp_dir, "last_ckpt.pth"))

        #-----------------------------------------------#
        #   保存模型的最佳权重, 方便使用到其它的任务中
        #   以mAP验证最大的值为最佳权重
        #-----------------------------------------------#       
        # if len(record_history.ckpt_record['val_loss']) <= 1 or (val_loss_all / num_val_iter) <= min(record_history.ckpt_record['val_loss']): # 选择val_loss最小的为最优
        if epoch % args.eval_period == 0 and args.eval_flag:
            if len(eval_callback.eval_map_record['eval_map']) ==0 :
                pass
            elif len(eval_callback.eval_map_record['eval_map']) ==1 or (eval_callback.eval_map_record['eval_map'][-1]) > max(eval_callback.eval_map_record['eval_map'][:-1]): # 选择eval_map最大的为最优
                print("\n\033[1;32;40mSave epoch {} as best_epoch_weights.pth\033[0m".format(epoch +  1))
                with open(os.path.join(exp_dir, "Mainconfig.txt"), 'a') as f:
                    f.write('Save best_epoch_weights at {} , best mAP is {}\n'.format((epoch +  1),(eval_callback.eval_map_record['eval_map'][-1])))
                torch.save(args.detector.state_dict(), os.path.join(exp_dir, "best_epoch_weights.pth"))

    record_history.writer.close()
