# This is the main training file we are using
import os
import argparse
import random
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import numpy as np
import cv2

from datasets import Dataset4YoloAngle
from utils import MWtools, timer, visualization
import api



def parse_args():
    parser = argparse.ArgumentParser()
    # parser.add_argument('--SCC', action='store_true')
    parser.add_argument('--model', type=str, default='rapid_pL1')
    parser.add_argument('--backbone', type=str, default='mobile2')
    parser.add_argument('--dataset', type=str, default='COCO') # H1MWMK
    parser.add_argument('--batch_size', type=int, default=4)

    #parser.add_argument('--high_resolution', action='store_true')
    parser.add_argument('--high_resolution', action='store_false')     # 默认用高分辨率训练

    # checkpoint 默认使用82K迭代产生的结果
    parser.add_argument('--checkpoint', type=str, default='')

    parser.add_argument('--eval_interval', type=int, default=1000)
    parser.add_argument('--img_interval', type=int, default=500)
    parser.add_argument('--print_interval', type=int, default=1)
    parser.add_argument('--checkpoint_interval', type=int, default=2000)
    
    parser.add_argument('--debug', action='store_true') # default=True)
    parser.add_argument('--cuda', type=bool, default=True)

    #parser.add_argument('--detection_params_freeze', action='store_true')
    #parser.add_argument('--detection_params_freeze', action='store_false')   #默认使用冻结参数训练
    #parser.add_argument('--backbone_params_freeze', action='store_false')  # 默认使用冻结backbone参数训练

    return parser.parse_args()


if __name__ == '__main__':
    print('hello world')
    args = parse_args()
    assert args.cuda and torch.cuda.is_available() # Currently do not support CPU
    # -------------------------- settings ---------------------------
    # assert not args.adversarial
    target_size = 1024 if args.high_resolution else 608
    initial_size = 1088 if args.high_resolution else 672
    job_name = f'{args.model}_{args.backbone}_{args.dataset}{target_size}'
    # dataloader setting
    only_person = False if args.model == 'Hitachi80' else True
    print('Only train on person images and object:', only_person)
    batch_size = args.batch_size
    num_cpu = 0 if batch_size == 1 else 8
    subdivision = 128 // batch_size
    enable_aug = True
    multiscale = True # if (args.model != 'alpha_fc' and enable_aug) else False
    multiscale_interval = 10
    # SGD optimizer
    decay_SGD = 0.0005 * batch_size * subdivision
    print(f'effective batch size = {batch_size} * {subdivision}')
    # dataset setting
    print('initialing dataloader...')
    if args.dataset == 'COCO':
        train_img_dir = '/home/sishaofeng/RAPID/data/COCO/train2017/'
        train_json = '/home/sishaofeng/RAPID/data/COCO/annotations/instances_train2017.json'
        #train_img_dir = './COCO/valone/'
        #train_json = './COCO/annotations/instances_valone.json'
        val_img_dir = '/home/sishaofeng/RAPID/data/COCO/val2017/'
        val_json = '/home/sishaofeng/RAPID/data/COCO/annotations/instances_val2017_new.json'
        lr_SGD = 0.0001 / batch_size / subdivision
        # Learning rate setup
        def burnin_schedule(i):
            burn_in = 500
            if i < burn_in:
                factor = (i / burn_in) ** 2
            elif i < 30000:
                factor = 1.0
            elif i < 40000:
                factor = 0.5
            elif i < 100000:
                factor = 0.2
            elif i < 300000:
                factor = 0.1
            else:
                factor = 0.01
            return factor
    elif args.dataset == 'MW':
        train_img_dir = '../../../MW18Mar/whole'
        train_json = '../../../MW18Mar/annotations/no19_nosmall.json'
        val_img_dir = '../../../COSSY/valJan/'
        val_json = '../../../COSSY/annotations/valJan.json'
        lr_SGD = 0.0001 / batch_size / subdivision
        # Learning rate setup
        def burnin_schedule(i):
            burn_in = 500
            if i < burn_in:
                factor = (i / burn_in) ** 2
            elif i < 10000:
                factor = 1.0
            elif i < 20000:
                factor = 0.3
            else:
                factor = 0.1
            return factor
    elif args.dataset == 'H1H2':
        videos = ['Meeting1', 'Meeting2', 'Lab2',
                  'Lunch1', 'Lunch2', 'Lunch3', 'Edge_cases', 'IRill', 'Activity','IRfilter']
        # if args.high_resolution:
        #     videos += ['All_off', 'IRfilter', 'IRill']
        train_img_dir = [f'./COSSY/{s}/' for s in videos]
        train_json = [f'./COSSY/annotations/{s}.json' for s in videos]
        val_img_dir = './COSSY/Lab1/'
        val_json = './COSSY/annotations/Lab1.json'
        lr_SGD = 0.0001 / batch_size / subdivision
        # Learning rate setup
        def burnin_schedule(i):
            burn_in = 500
            if i < burn_in:
                factor = (i / burn_in) ** 2
            elif i < 10000:
                factor = 1.0
            elif i < 20000:
                factor = 0.3
            else:
                factor = 0.1
            return factor
    elif args.dataset == 'H1MW':
        videos = ['Meeting1', 'Meeting2', 'Lab2', 'MW']
        train_img_dir = [f'./COSSY/{s}/' for s in videos]
        train_json = [f'./COSSY/annotations/{s}.json' for s in videos]
        val_img_dir = './COSSY/Lab1/'
        val_json = './COSSY/annotations/Lab1.json'
        lr_SGD = 0.0001 / batch_size / subdivision    # 这里看似和batch_size 有关系，实际上subdivision为128//batch_size
        # Learning rate setup
        def burnin_schedule(i):
            burn_in = 500
            if i < burn_in:
                factor = (i / burn_in) ** 2
            elif i < 10000:
                factor = 1.0
            elif i < 20000:
                factor = 0.3
            else:
                factor = 0.1
            return factor
    elif args.dataset == 'H2MW':
        videos = ['Lunch1', 'Lunch2', 'Edge_cases', 'IRill', 'Activity',
                  'MW']
        # if args.high_resolution:
        #     videos += ['All_off', 'IRfilter']
        train_img_dir = [f'../../../COSSY/{s}/' for s in videos]
        train_json = [f'../../../COSSY/annotations/{s}.json' for s in videos]
        val_img_dir = '../../../COSSY/Lunch3/'
        val_json = '../../../COSSY/annotations/Lunch3.json'
        lr_SGD = 0.0001 / batch_size / subdivision
        # Learning rate setup
        def burnin_schedule(i):
            burn_in = 500
            if i < burn_in:
                factor = (i / burn_in) ** 2
            elif i < 10000:
                factor = 1.0
            elif i < 20000:
                factor = 0.3
            else:
                factor = 0.1
            return factor
    elif args.dataset == 'H1MWMK_mix_angle':   # mixup 以及angle直方图增强
        videos = ['Activity', 'Edge_cases', 'IRill', 'MW','Market1_enhance','Market2_enhance',
                  'Lab2','Lunch2','Meeting1','Meeting2','IRfilter','Lab1',
                  'Lunch1','Lunch3']
        train_img_dir = [f'./COSSY_train_shao/{s}/' for s in videos]
        train_json = [f'./COSSY_train_shao/annotations/{s}.json' for s in videos]
        val_img_dir_1 = './COSSY_val_new/total/'
        val_json_1 = './COSSY_val_new/annotations/total.json'
        val_img_dir_2 = './COSSY_val_new/Market/'
        val_json_2 = './COSSY_val_new/annotations/Market.json'
        lr_SGD = 0.00001 / batch_size / subdivision  # 这里看似和batch_size 有关系，实际上subdivision为128//batch_size
        # Learning rate setup
        def burnin_schedule(i):
            burn_in = 500
            if i < burn_in:
                factor = (i / burn_in) ** 2
            elif i < 10000:
                factor = 1.0
            elif i < 20000:
                factor = 0.3
            else:
                factor = 0.1
            return factor
    elif args.dataset == 'H1MWMK':      # 不做任何增强
        videos = ['Activity', 'Edge_cases', 'IRill', 'MW','Market1','Market2',
                  'Lab2','Lunch2','Meeting1','Meeting2','IRfilter','Lab1',
                  'Lunch1','Lunch3']
        train_img_dir = [f'./COSSY_train_new/{s}/' for s in videos]
        train_json = [f'./COSSY_train_new/annotations/{s}.json' for s in videos]
        val_img_dir_1 = './COSSY_val_new/total/'
        val_json_1 = './COSSY_val_new/annotations/total.json'
        val_img_dir_2 = './COSSY_val_new/Market/'
        val_json_2 = './COSSY_val_new/annotations/Market.json'
        lr_SGD = 0.0001 / batch_size / subdivision  # 这里看似和batch_size 有关系，实际上subdivision为128//batch_size
        # Learning rate setup
        def burnin_schedule(i):
            burn_in = 500
            if i < burn_in:
                factor = (i / burn_in) ** 2
            elif i < 10000:
                factor = 1.0
            elif i < 20000:
                factor = 0.3
            else:
                factor = 0.1
            return factor
    dataset = Dataset4YoloAngle(train_img_dir, train_json, initial_size, enable_aug,
                                only_person=only_person, debug_mode=args.debug)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, 
                            num_workers=num_cpu, pin_memory=True, drop_last=False)
    dataiterator = iter(dataloader)
    
    if args.model == 'rapid_pL1':
        from models.rapid import RAPiD
        model = RAPiD(backbone=args.backbone, img_norm=False,
                       loss_angle='period_L1')
    elif args.model == 'rapid_pL2':
        from models.rapid import RAPiD
        model = RAPiD(backbone=args.backbone, img_norm=False,
                       loss_angle='period_L2')
    
    model = model.cuda() if args.cuda else model

    start_iter = -1
    if args.checkpoint:
        print("loading ckpt...", args.checkpoint)
        weights_path = os.path.join('./weights/', args.checkpoint)
        state = torch.load(weights_path)
        model.load_state_dict(state['model_state_dict'])
        start_iter = state['iter']

    val_set_1 = MWtools.MWeval(val_json_1, iou_method='rle')
    val_set_2 = MWtools.MWeval(val_json_2, iou_method='rle')
    eval_img_names = os.listdir('./images/')
    eval_img_paths = [os.path.join('./images/',s) for s in eval_img_names]
    #logger = SummaryWriter(f'./logs/{job_name}')      # 创建SummaryWriter实体
    train_logger = SummaryWriter(f'./logs/train/{job_name}')    # 创建SummaryWriter实体
    valid_logger_1 = SummaryWriter(f'./logs/valid_all/{job_name}')
    valid_logger_2 = SummaryWriter(f'./logs/valid_market/{job_name}')

    # optimizer setup
    backbone_params = list(map(id, model.backbone.parameters()))             # backbone 的网络参数单独提取出来
    detection_params = filter(lambda p: id(p) not in backbone_params,        # 网络剩余部分参数
                         model.parameters())
    #
    if args.backbone_params_freeze:      # 冻结backbone部分参数进行训练
        for p in model.backbone.parameters():
            p.requires_grad = False
    if args.dataset in {'COCO', 'COCOTHH1MW'}:
        #optimizer = torch.optim.SGD(params, lr=lr_SGD, momentum=0.9, dampening=0,
        #                            weight_decay=decay_SGD)
        # 同时训练，采用不同学习率，训练backbone时，backbone采用较大的学习率
        optimizer = torch.optim.SGD([{'params': detection_params},
                                     {'params': model.backbone.parameters(), 'lr': lr_SGD * 10}],    # backbone的网络参数学习率是检测网络的10倍
                                    lr=lr_SGD, momentum=0.9, dampening=0, weight_decay=decay_SGD)
    elif args.dataset in {'MW', 'H1H2', 'H1MW', 'H2MW', 'THEODORE', 'THH1MW',
                          'THH1H2', 'THH2MW'}:
        assert args.checkpoint is not None
        #optimizer = torch.optim.SGD(params, lr=lr_SGD)
        optimizer = torch.optim.SGD([{'params': detection_params},
                                     {'params': backbone_params}], lr=lr_SGD)
    elif args.dataset in {'H1MWMK','H1MWMK_mix_angle'}:
        # 冻结backbone，对detection进行微调
        optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
                                    lr=lr_SGD)
    else:
        raise NotImplementedError()

    if args.dataset not in args.checkpoint:
        start_iter = -1
    else:
        optimizer.load_state_dict(state['optimizer_state_dict'])
        print(f'begin from iteration: {start_iter}')
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, burnin_schedule, last_epoch=start_iter)# 这里学习率设置为当前学习率乘上burnin_schedule的返回值

    # start training loop
    today = timer.today()
    start_time = timer.tic()
    for iter_i in range(start_iter, 150000):   # COCO预训练
        # evaluation
        if iter_i % args.eval_interval == 0 and (args.dataset != 'COCO' or iter_i > 0):
        #if iter_i % args.eval_interval == 0 and (iter_i > 0):                #  非 COCO数据集
        #if iter_i % args.eval_interval == 0 and (args.dataset == 'COCO'):
            print('in evaluation')
            with timer.contexttimer() as t0:
                model.eval()
                model_eval = api.Detector(conf_thres=0.005, model=model)
                dts_1 = model_eval.detect_imgSeq(val_img_dir_1, input_size=target_size)
                dts_2 = model_eval.detect_imgSeq(val_img_dir_2, input_size=target_size)

                str_1 = val_set_1.evaluate_dtList(dts_1, metric='AP')
                str_2 = val_set_2.evaluate_dtList(dts_2, metric='AP')
            s1 = s2 = f'\nCurrent time: [ {timer.now()} ], iteration: [ {iter_i} ]\n\n'
            s1 += str_1 + '\n\n'
            s2 += str_2 + '\n\n'
            s1 += f'Validation elapsed time: [ {t0.time_str} ]'
            s2 += f'Validation elapsed time: [ {t0.time_str} ]'
            print(s1)
            print(s2)
            valid_logger_1.add_text('Validation summary', s1, iter_i)
            valid_logger_1.add_scalar('Validation AP[IoU=0.5]', val_set_1._getAP(0.5), iter_i)
            valid_logger_1.add_scalar('Validation AP[IoU=0.75]', val_set_1._getAP(0.75), iter_i)
            valid_logger_1.add_scalar('Validation AP[IoU=0.5:0.95]', val_set_1._getAP(), iter_i)

            valid_logger_2.add_text('Validation summary', s2, iter_i)
            valid_logger_2.add_scalar('Validation AP[IoU=0.5]', val_set_2._getAP(0.5), iter_i)
            valid_logger_2.add_scalar('Validation AP[IoU=0.75]', val_set_2._getAP(0.75), iter_i)
            valid_logger_2.add_scalar('Validation AP[IoU=0.5:0.95]', val_set_2._getAP(), iter_i)
            model.train()

        # subdivision loop
        # loop_times = subdivision if not args.adversarial else subdivision//2
        optimizer.zero_grad()
        for inner_iter_i in range(subdivision):

            try:
                imgs, targets, cats, _, _ = next(dataiterator)  # load a batch
            except StopIteration:
                dataiterator = iter(dataloader)
                imgs, targets, cats, _, _ = next(dataiterator)  # load a batch
            # visualization.imshow_tensor(imgs)
            imgs = imgs.cuda() if args.cuda else imgs
            loss,seperate_loss = model(imgs, targets, labels_cats=cats)
            print(str(inner_iter_i) + ',loss:' + str(float(loss)))
            loss.backward()
            # if args.adversarial:
            #     imgs = imgs + imgs.grad*0.05
            #     imgs = imgs.detach()
            #     # visualization.imshow_tensor(imgs)
            #     loss = model(imgs, targets)
            #     loss.backward()
        optimizer.step()
        scheduler.step()
        print('train over')
        # logging
        if iter_i % args.print_interval == 0:
            sec_used = timer.tic() - start_time
            time_used = timer.sec2str(sec_used)
            avg_iter = timer.sec2str(sec_used/(iter_i+1-start_iter))
            avg_epoch = avg_iter / batch_size / subdivision * 118287
            print(f'\nTotal time: {time_used}, iter: {avg_iter}, epoch: {avg_epoch}')
            '''
            backbone_lr = scheduler.get_last_lr()[1] * batch_size * subdivision   #>=pytorch 1.4.0
            detection_lr = scheduler.get_last_lr()[0] * batch_size * subdivision
            '''
            current_lr = scheduler.get_last_lr()[0] * batch_size * subdivision       # pytorch 1.2.0
            print(f'[Iteration {iter_i}] [detection learning rate {current_lr:.3g}]',
                  f'[Total loss {float(loss):.2f}] [img size {dataset.img_size}]')
            print(model.loss_str)
            max_cuda = torch.cuda.max_memory_allocated(0) / 1024 / 1024 / 1024
            print(f'Max GPU memory usage: {max_cuda} GigaBytes')
            torch.cuda.reset_peak_memory_stats(0)
            train_logger.add_scalar('regression_loss', float(seperate_loss[0]), iter_i)
            train_logger.add_scalar('angle_loss',float(seperate_loss[1]),iter_i)
            train_logger.add_scalar('classification_loss', float(seperate_loss[2]), iter_i)
            train_logger.add_scalar('total_loss', float(loss), iter_i)
            train_logger.add_scalar('backbone_learning_rate',current_lr,iter_i)
            #train_logger.add_scalar('detection_learning_rate', detection_lr, iter_i)
        print('log over')
        # random resizing
        if multiscale and iter_i > 0 and (iter_i % multiscale_interval == 0):
            if args.high_resolution:
                imgsize = random.randint(16, 34) * 32
            else:
                low = 10 if args.dataset == 'COCO' else 16
                imgsize = random.randint(low, 21) * 32
            dataset.img_size = imgsize
            dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True,
                                num_workers=num_cpu, pin_memory=True, drop_last=False)
            dataiterator = iter(dataloader)
        print('random resize over')
        # save checkpoint
        if iter_i > 0 and (iter_i % args.checkpoint_interval == 0):
            state_dict = {
                'iter': iter_i,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            save_path = os.path.join('./weights', f'{job_name}_{today}_{iter_i}.ckpt')
            torch.save(state_dict, save_path)
        print('save checkpoint over')
        # save detection

        if iter_i > 0 and iter_i % args.img_interval == 0:
            for img_path in eval_img_paths:
                eval_img = Image.open(img_path)
                dts = api.detect_once(model, eval_img, conf_thres=0.1, input_size=target_size)
                np_img = np.array(eval_img)
                visualization.draw_dt_on_np(np_img, dts)
                np_img = cv2.resize(np_img, (416,416))
                # cv2.imwrite(f'./results/eval_imgs/{job_name}_{today}_{iter_i}.jpg', np_img)
                valid_logger_1.add_image(img_path, np_img, iter_i, dataformats='HWC')

            model.train()
        print('save detection over')
