import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

import torch
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import utils
import transforms as T
from PIL import Image,ImageDraw,ImageFont
import itertools
import glob,time
from torchvision.models.detection.rpn import AnchorGenerator
import torch.jit
from torch import nn
import torch.nn.functional as F

def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)

class TwoMLPHead(nn.Module):
    """
    Standard heads for FPN-based models

    Arguments:
        in_channels (int): number of input channels
        representation_size (int): size of the intermediate representation
    """

    def __init__(self, in_channels=128, representation_size=1024):
        super(TwoMLPHead, self).__init__()

        self.fc6 = nn.Linear(in_channels, representation_size)
        self.fc7 = nn.Linear(representation_size, representation_size)

    def forward(self, x):
        x = x.mean(dim=3).mean(2)  #use mean pooling instead of flatten,

        x = F.relu(self.fc6(x))
        x = F.relu(self.fc7(x))

        return x

class FastRCNNPredictor(nn.Module):
    """
    Standard classification + bounding box regression layers
    for Fast R-CNN.

    Arguments:
        in_channels (int): number of input channels
        num_classes (int): number of output classes (including background)
    """

    def __init__(self, in_channels, num_classes):
        super(FastRCNNPredictor, self).__init__()
        self.cls_score = nn.Linear(in_channels, num_classes)
        self.bbox_pred = nn.Linear(in_channels, num_classes * 4)

    def forward(self, x):
        if x.dim() == 4:
            assert list(x.shape[2:]) == [1, 1]
        x = x.flatten(start_dim=1)
        scores = self.cls_score(x)
        bbox_deltas = self.bbox_pred(x)

        return scores, bbox_deltas
    
def main(args):
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    # dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True), args.data_path)
    # dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False), args.data_path)
    transform = get_transform(False)
    # dataset_test = CharacterLoader.CharacterDataset(data_dir="/NetDisk/SSD/SSD-研发部/项目工作目录/OCR项目/拍摄的数据集/邦纳提供图片整理/小图/**/*.jpg",
    #                                                 split="test",
    #                                                 transforms=transform)
    num_classes = 2

    print("Creating model")

    # box_head = TwoMLPHead(in_channels=256, representation_size=256)
    # box_predictor = FastRCNNPredictor(in_channels=256, num_classes=num_classes)
    box_head = None
    box_predictor = None
    anchor_sizes = ((24,), (48,), (64,), (96,))
    aspect_ratios = ((1., 2.),) * len(anchor_sizes)  # 需要是浮点数

    min_size = 128
    max_size = 2560
    rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
    model = torchvision.models.detection.__dict__[args.model](num_classes=None if box_predictor else num_classes,
                                                              pretrained=args.pretrained,
                                                              backbone_name="resnet50",
                                                              ##**kwargs
                                                              min_size=min_size, max_size=max_size,
                                                              image_mean=(0.485, 0.456, 0.406),
                                                              image_std=(0.229, 0.224, 0.225),

                                                              # RPN parameters
                                                              rpn_anchor_generator=rpn_anchor_generator, rpn_head=None,
                                                              rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
                                                              rpn_post_nms_top_n_train=2000,
                                                              rpn_post_nms_top_n_test=1000,
                                                              rpn_nms_thresh=0.7,
                                                              rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
                                                              rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,

                                                              # Box parameters
                                                              box_roi_pool=None, box_head=box_head,
                                                              box_predictor=box_predictor,
                                                              box_score_thresh=0.05, box_nms_thresh=0.5,
                                                              box_detections_per_img=100,
                                                              box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
                                                              box_batch_size_per_image=512, box_positive_fraction=0.25,
                                                              bbox_reg_weights=None
                                                              )
    model.to(device)
    model.eval()
    model_path = "model/20200425-082855_fasterrcnn_resnet50dcn_fpn_flatten_roifeature/fasterrcnn_resnetdcn_fpn_model_6.pth"
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['model'])

    traced_script_module = torch.jit.script(model)
    traced_script_module.save("fasterrcnn_resnet50dcn_epoch6_20200425.pt")


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(
        description=__doc__)

    parser.add_argument('--data-path', default='', help='dataset')
    parser.add_argument('--dataset', default='pascal_voc', help='dataset')
    parser.add_argument('--model', default='fasterrcnn_resnetdcn_fpn',
                        help='model, choice of [maskrcnn_resnet50_fpn, fasterrcnn_resnetdcn_fpn, fasterrcnn_resnet50_fpn]')
    parser.add_argument('--device', default='cuda', help='device')
    parser.add_argument('-b', '--batch-size', default=2, type=int,
                        help='images per gpu, the total batch size is $NGPU x batch_size')
    parser.add_argument('--epochs', default=26, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')
    parser.add_argument('--lr', default=0.02, type=float,
                        help='initial learning rate, 0.02 is the default value for training '
                             'on 8 gpus and 2 images_per_gpu')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--lr-step-size', default=8, type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
    parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
    parser.add_argument('--output-dir', default='.', help='path where to save')
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
    parser.add_argument('--aspect-ratio-group-factor', default=-1, type=int)
    parser.add_argument(
        "--test-only",
        dest="test_only",
        help="Only test the model",
        action="store_true",
    )
    # 如果pretrained为False, 则会加载backbone的预训练模型，否则会加载faster_rcnn_resnet50_fp整个检测的预训练模型
    parser.add_argument(
        "--pretrained",
        default=False,
        dest="pretrained",
        help="Use pre-trained models from the modelzoo",
        action="store_true",
    )

    # distributed training parameters
    parser.add_argument('--world-size', default=1, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')

    args = parser.parse_args()



    main(args)