r"""PyTorch Detection Training.

To run in a multi-gpu environment, use the distributed launcher::

    python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \
        train.py ... --world-size $NGPU

The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu.
    --lr 0.02 --batch-size 2 --world-size 8
If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU.

On top of that, for training Faster/Mask R-CNN, the default hyperparameters are
    --epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3

Also, if you train Keypoint R-CNN, the default hyperparameters are
    --epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3
Because the number of images is smaller in the person keypoint subset of COCO,
the number of epochs should be adapted so that we have the same number of iterations.
"""
import datetime
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import time

import torch
import torch.utils.data
from torch import nn
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
import albumentations as albu
import CharacterLoader
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from engine import train_one_epoch
import cv2
import utils
import transforms as T
import numpy as np
import torch.nn.functional as F
from torchvision.models.detection.rpn import AnchorGenerator
torch.cuda.empty_cache()


def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)


class TwoMLPHead(nn.Module):
    """
    Standard heads for FPN-based models

    Arguments:
        in_channels (int): number of input channels
        representation_size (int): size of the intermediate representation
    """

    def __init__(self, in_channels=128, representation_size=1024):
        super(TwoMLPHead, self).__init__()

        self.fc6 = nn.Linear(in_channels, representation_size)
        self.fc7 = nn.Linear(representation_size, representation_size)

    def forward(self, x):
        x = x.mean(dim=3).mean(2)  #use mean pooling instead of flatten,

        x = F.relu(self.fc6(x))
        x = F.relu(self.fc7(x))

        return x

class FastRCNNPredictor(nn.Module):
    """
    Standard classification + bounding box regression layers
    for Fast R-CNN.

    Arguments:
        in_channels (int): number of input channels
        num_classes (int): number of output classes (including background)
    """

    def __init__(self, in_channels, num_classes):
        super(FastRCNNPredictor, self).__init__()
        self.cls_score = nn.Linear(in_channels, num_classes)
        self.bbox_pred = nn.Linear(in_channels, num_classes * 4)

    def forward(self, x):
        if x.dim() == 4:
            assert list(x.shape[2:]) == [1, 1]
        x = x.flatten(start_dim=1)
        scores = self.cls_score(x)
        bbox_deltas = self.bbox_pred(x)

        return scores, bbox_deltas


def main(args):
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    # dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True), args.data_path)
    # dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False), args.data_path)
    transform = get_transform(True)
    #"/media/retoo/RetooDisk/wanghui/Data/character/0320字符检测训练数据集/train"
    dataset = CharacterLoader.CharacterDataset(data_dir="/media/retoo/RetooDisk1/wanghui/Data/character/0330字符检测数据集/train/",#"/media/retoo/RetooDisk/wanghui/Data/character/train_hecheng/"
                                               split="train",
                                               transforms=transform
                                               )
    num_classes = 2

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
    else:
        train_sampler = torch.utils.data.RandomSampler(dataset)

    if args.aspect_ratio_group_factor >= 0:
        group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(
            train_sampler, args.batch_size, drop_last=True)

    data_loader = torch.utils.data.DataLoader(dataset, 
                                              # batch_sampler=train_batch_sampler,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.workers,
                                              collate_fn=utils.collate_fn)

    #TODO for single layer prediction, modify the args
    # anchor_sizes = ((16,), (32,), (48,), (64,), (96,))

    # for resnet
    anchor_sizes = ((8,), (16,), (32,), (64,))
    aspect_ratios = ((1., 2.),) * len(anchor_sizes)  #需要是浮点数

    #for mobile net
    # anchor_sizes = ((16 ,32, 48, 64))
    # aspect_ratios = ((1., 2.),) * len(anchor_sizes)  #需要是浮点数

    # for shuffle net
    # anchor_sizes = ((24,), (48,), (96,))
    # aspect_ratios = ((1., 2.),) * len(anchor_sizes)  #需要是浮点数

    min_size = 128
    max_size = 2560
    rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)

    # box_head = TwoMLPHead(in_channels=256, representation_size=256)
    # box_predictor = FastRCNNPredictor(in_channels=256, num_classes=num_classes)
    box_head = None
    box_predictor = None

    print("Creating model")
    model = torchvision.models.detection.__dict__[args.model](num_classes=None if box_predictor else num_classes,
                                                              backbone_name="resnet50",
                                                              pretrained=args.pretrained,

                                                              ##**kwargs
                                                              min_size=min_size, max_size=max_size,
                                                              image_mean=(0.485, 0.456, 0.406),image_std=(0.229, 0.224, 0.225),
                                                              
                                                              # RPN parameters
                                                              rpn_anchor_generator=rpn_anchor_generator, rpn_head=None,
                                                              rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
                                                              rpn_post_nms_top_n_train=2000,
                                                              rpn_post_nms_top_n_test=1000,
                                                              rpn_nms_thresh=0.7,
                                                              rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
                                                              rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
                                                              
                                                              # Box parameters
                                                              box_roi_pool=None, box_head=box_head, box_predictor=box_predictor,
                                                              box_score_thresh=0.05, box_nms_thresh=0.5,
                                                              box_detections_per_img=100,
                                                              box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
                                                              box_batch_size_per_image=512, box_positive_fraction=0.25,
                                                              bbox_reg_weights=None
                                                              )
    model.to(device)

    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module
    # elif torch.cuda.device_count() > 1: #  does't support multi gpu traing
    #     device_ids = [i for i in range(torch.cuda.device_count())]
    #     model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
    #     model_without_ddp = model.module
    else:
        model_without_ddp = model

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
    # bugs for MultiStepLR
    # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
    lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.7)

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    print("Start training")
    subdir = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d-%H%M%S')
    save_path = args.output_dir+"/%s_%s"%(subdir,args.model)
    os.makedirs(save_path, exist_ok=True)
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
        lr_scheduler.step(epoch)
        if args.output_dir:
            utils.save_on_master({
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'args': args,
                'epoch': epoch},
                os.path.join(save_path, '{}_model_{}.pth'.format(args.model, epoch)))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument('--data-path', default='', help='dataset')
    parser.add_argument('--dataset', default='pascal_voc', help='dataset')
    parser.add_argument('--model', default='fasterrcnn_resnet50_fpn',
                        help='model, choice of [fasterrcnn_resnet50_fpn, fasterrcnn_mobilenetv2_fpn,'
                             'fasterrcnn_shufflenet_fpn,fasterrcnn_resnetdcn_fpn]')
    parser.add_argument('--device', default='cuda', help='device')
    parser.add_argument('-b', '--batch-size', default=12, type=int,
                        help='images per gpu, the total batch size is $NGPU x batch_size')
    parser.add_argument('--epochs', default=8, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')
    parser.add_argument('--lr', default=0.005, type=float,
                        help='initial learning rate, 0.02 is the default value for training '
                        'on 8 gpus and 2 images_per_gpu')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--lr-step-size', default=6, type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-steps', default=[2, 4], nargs='+', type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
    parser.add_argument('--print-freq', default=50, type=int, help='print frequency')
    parser.add_argument('--output-dir', default='./model/', help='path where to save')
    parser.add_argument('--resume',
                        default='',
                        help='resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
    parser.add_argument('--aspect-ratio-group-factor', default=-1, type=int)
    parser.add_argument(
        "--test-only",
        dest="test_only",
        help="Only test the model",
        action="store_true",
    )
    #如果pretrained为False, 则会加载backbone的预训练模型，否则会加载faster_rcnn_resnet50_fp整个检测的预训练模型
    parser.add_argument(
        "--pretrained",
        default=False,
        dest="pretrained",
        help="Use pre-trained models from the modelzoo",
        action="store_true",
    )

    # distributed training parameters
    parser.add_argument('--world-size', default=1, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')

    args = parser.parse_args()

    if args.output_dir:
        utils.mkdir(args.output_dir)

    main(args)
