# python native-support
import os
import sys
import time
import datetime
import argparse
import json

# pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.utils.tensorboard as tensorboard    
torch.set_printoptions(precision=2)

# others
import numpy as np
import cv2

# self
from data.retinaface.widerfacedataset import EnhancedRetinaWiderFaceDataset
from data.retinaface.dataaugmentation import TrainPreprocessor, EvalPreprocessor, detection_collate
from modules.retinaface.multiboxloss import MultiBoxLoss
from modules.retinaface.priorbox import PriorBox
from modules.retinaface.retinaface import RetinaFace
from modules.retinaface.trainer import RetinaFaceTrainer


if __name__ == "__main__":
    # run: 'python -m trainval' in terminal    

    parser = argparse.ArgumentParser(description='Retinaface Training')
    parser.add_argument('--run_name', type=str, default="retinaface_{0}".format(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")))
    parser.add_argument('--network_backone', type=str, default="mobilenetv1")
    parser.add_argument('--image_size', type=int, nargs=2, default=[256, 256])
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--num_workers', type=int, default=4)
    parser.add_argument('--epochs', type=int, default=1000)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--optimizer', type=str, default="adam")
    parser.add_argument('--learning_rate', type=float, default=1e-2)
    parser.add_argument('--momentum', type=float, default=0.9)
    parser.add_argument('--weight_decay', type=float, default=5e-4)
    parser.add_argument('--checkpoints_folder', type=str, default="./checkpoints")
    parser.add_argument('--imgs_train', type=str, default="./data/wider_face/WIDER_train/images")
    parser.add_argument('--imgs_val', type=str, default="./data/wider_face/WIDER_val/images")
    parser.add_argument('--annoations_train', type=str, default="./data/annotations/wider_face_train_retina_enhanced.json")
    parser.add_argument('--annoations_val', type=str, default="./data/annotations/wider_face_val_retina_enhanced.json")
    parser.add_argument('--annoations_val_easy', type=str, default="./data/annotations/wider_easy_val_offical.json")
    parser.add_argument('--annoations_val_medi', type=str, default="./data/annotations/wider_medi_val_offical.json")
    parser.add_argument('--annoations_val_hard', type=str, default="./data/annotations/wider_hard_val_offical.json")
    parser.add_argument('--normalize_mean', type=float, nargs=3, default=(0.4914, 0.4822, 0.4465))
    parser.add_argument('--normalize_std', type=float, nargs=3, default=(0.2023, 0.1994, 0.2010))
    parser.add_argument('--num_classes', type=int, default=2)
    parser.add_argument('--prior_if_clip', type=int, default=0)
    parser.add_argument('--prior_min_sizes', type=float, nargs='*', default=[[7, 20], [32, 64], [112, 224]])
    parser.add_argument('--prior_steps', type=float, nargs='*', default=[8, 16, 32])
    parser.add_argument('--overlap_thresh', type=float, default=0.35)
    parser.add_argument('--neg_pos_ratio', type=float, default=7)
    parser.add_argument('--variance', type=float, nargs='*', default=[0.1, 0.2])
    parser.add_argument('--loss_loc_weight', type=float, default=2.0)
    # for ReduceLROnPlateau learning rate scheduler ..
    parser.add_argument('--scheduler_factor', type=float, default=1e-1)
    parser.add_argument('--scheduler_threshold', type=float, default=1e-5)
    parser.add_argument('--scheduler_patience', type=int, default=30)
    parser.add_argument('--scheduler_min_lr', type=float, default=1e-7)
    # for evaluation ..
    parser.add_argument('--confidence_threshold', type=float, default=0.5)
    parser.add_argument('--nms_iou_threshold', type=float, default=0.5)
    args = parser.parse_args()
    
    Device  = "cuda:{0}".format(args.gpu) if torch.cuda.is_available() and args.gpu >= 0 else "cpu"
    RunFolder = os.path.join(args.checkpoints_folder, args.run_name)
    NormMean = np.array(args.normalize_mean)
    NormStd = 1.0 / np.array(args.normalize_std)
    
    # save the argument ...
    print(args)
    args_dict = vars(args)
    args_dict["run_folder"] = RunFolder
    if os.path.exists(RunFolder):
        shutil.rmtree(RunFolder)
    # end-if
    os.makedirs(RunFolder)
    with open(os.path.join(RunFolder, 'args.json'), 'w') as fp:
        json.dump(args_dict, fp, indent=4, sort_keys=True)
    # end-with

    # Prepare the train/val datset and loader ...
    trainset = EnhancedRetinaWiderFaceDataset(
        dataset_images_path=args.imgs_train, 
        dataset_annotations_jsonfile=args.annoations_train, 
        preproc=TrainPreprocessor((args.image_size[0], args.image_size[1]), args.normalize_mean, args.normalize_std)
        )
    trainloader = torch.utils.data.DataLoader(trainset, args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate)
    # 
    valset = EnhancedRetinaWiderFaceDataset(
        dataset_images_path=args.imgs_val, 
        dataset_annotations_jsonfile=args.annoations_val, 
        preproc=EvalPreprocessor((args.image_size[0], args.image_size[1]), args.normalize_mean, args.normalize_std)
        )
    valloader = torch.utils.data.DataLoader(valset, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=detection_collate)
    # 
    valset_easy = EnhancedRetinaWiderFaceDataset(
        dataset_images_path=args.imgs_val, 
        dataset_annotations_jsonfile=args.annoations_val_easy, 
        preproc=EvalPreprocessor((args.image_size[0], args.image_size[1]), args.normalize_mean, args.normalize_std)
        )
    valloader_easy = torch.utils.data.DataLoader(valset_easy, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=detection_collate)
    # 
    valset_medi = EnhancedRetinaWiderFaceDataset(
        dataset_images_path=args.imgs_val, 
        dataset_annotations_jsonfile=args.annoations_val_medi, 
        preproc=EvalPreprocessor((args.image_size[0], args.image_size[1]), args.normalize_mean, args.normalize_std)
        )
    valloader_medi = torch.utils.data.DataLoader(valset_medi, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=detection_collate)
    # 
    valset_hard = EnhancedRetinaWiderFaceDataset(
        dataset_images_path=args.imgs_val, 
        dataset_annotations_jsonfile=args.annoations_val_hard, 
        preproc=EvalPreprocessor((args.image_size[0], args.image_size[1]), args.normalize_mean, args.normalize_std)
        )
    valloader_hard = torch.utils.data.DataLoader(valset_hard, args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=detection_collate)

    net = RetinaFace(args.network_backone)
    prior_min_sizes = args.prior_min_sizes if args.prior_min_sizes is not None else [[16, 32], [64, 128], [256, 512]]
    prior_steps = args.prior_steps if args.prior_steps is not None else [8, 16, 32]
    priorbox = PriorBox(
        min_sizes=args.prior_min_sizes,
        steps=args.prior_steps, 
        clip=args.prior_if_clip, 
        image_size=(args.image_size[0], args.image_size[1])
        )    
    priors = None
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.to(Device)
    # end-with

    # set optimizer
    optimizer = torch.optim.SGD(
        net.parameters(), 
        lr=args.learning_rate, 
        momentum=args.momentum, 
        weight_decay=args.weight_decay
        )
    if args.optimizer == 'adam':
        optimizer = torch.optim.Adam(
            net.parameters(),
            lr=args.learning_rate, 
            weight_decay=args.weight_decay
            )

    # set loss
    criterion = MultiBoxLoss(
        num_classes=args.num_classes,
        overlap_thresh=args.overlap_thresh, 
        neg_pos_ratio=args.neg_pos_ratio, 
        variance=args.variance,
        device=Device
        )
    
    # set learning rate scheduler ...
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 
        factor=args.scheduler_factor, 
        threshold=args.scheduler_threshold, 
        patience=args.scheduler_patience, 
        min_lr=args.scheduler_min_lr
        )

    # set tensorboardx writer ...
    tbx_writer = tensorboard.SummaryWriter(log_dir=os.path.join(RunFolder))

    # new a cifar10 trainer
    trainer = RetinaFaceTrainer(
        train_loader = trainloader,
        val_loader = valloader,
        eval_loader = valloader_hard,
        network = net,
        criterion = criterion,
        optimizer = optimizer,
        scheduler = scheduler,
        epochs = args.epochs,
        device = Device,
        tbx_writer = tbx_writer,
        checkpoints_folder = RunFolder, 
        priors=priors,
        loc_weight=args.loss_loc_weight
        )
    trainer._network.eval()    
    trainer.__call__(
        stop_lr = 1e-6, 
        box_priors = priors, 
        evalloader = valloader, 
        confidence_threshold = args.confidence_threshold, 
        iou_threshold = 0.5, 
        if_show = False
        )
    print("End train.")


