r"""PyTorch Detection Training.

To run in a multi-gpu environment, use the distributed launcher::

    python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \
        train.py ... --world-size $NGPU

The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu.
    --lr 0.02 --batch-size 2 --world-size 8
If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU.

On top of that, for training Faster/Mask R-CNN, the default hyperparameters are
    --epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3

Also, if you train Keypoint R-CNN, the default hyperparameters are
    --epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3
Because the number of images is smaller in the person keypoint subset of COCO,
the number of epochs should be adapted so that we have the same number of iterations.
"""
import datetime
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import torch
import torch.utils.data
from torch import nn
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import FoodLoader
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from engine import train_one_epoch
import numpy as np
from collections import OrderedDict
from torchvision.ops import misc as misc_nn_ops
import utils
import transforms as T
from PIL import Image,ImageDraw,ImageFont
import itertools
import glob,time
from torchvision.models.detection.rpn import AnchorGenerator
import itertools
import torch.nn.functional as F
import random

def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)

class TwoMLPHead(nn.Module):
    """
    Standard heads for FPN-based models

    Arguments:
        in_channels (int): number of input channels
        representation_size (int): size of the intermediate representation
    """

    def __init__(self, in_channels=128, representation_size=1024):
        super(TwoMLPHead, self).__init__()

        self.fc6 = nn.Linear(in_channels, representation_size)
        self.fc7 = nn.Linear(representation_size, representation_size)

    def forward(self, x):
        x = x.mean(dim=3).mean(2)  #use mean pooling instead of flatten,

        x = F.relu(self.fc6(x))
        x = F.relu(self.fc7(x))

        return x

class FastRCNNPredictor(nn.Module):
    """
    Standard classification + bounding box regression layers
    for Fast R-CNN.

    Arguments:
        in_channels (int): number of input channels
        num_classes (int): number of output classes (including background)
    """

    def __init__(self, in_channels, num_classes):
        super(FastRCNNPredictor, self).__init__()
        self.cls_score = nn.Linear(in_channels, num_classes)
        self.bbox_pred = nn.Linear(in_channels, num_classes * 4)

    def forward(self, x):
        if x.dim() == 4:
            assert list(x.shape[2:]) == [1, 1]
        x = x.flatten(start_dim=1)
        scores = self.cls_score(x)
        bbox_deltas = self.bbox_pred(x)

        return scores, bbox_deltas

# for mask
class MaskRCNNHeads(nn.Sequential):
    def __init__(self, in_channels, layers, dilation):
        """
        Arguments:
            num_classes (int): number of output classes
            input_size (int): number of channels of the input once it's flattened
            representation_size (int): size of the intermediate representation
        """
        d = OrderedDict()
        next_feature = in_channels
        for layer_idx, layer_features in enumerate(layers, 1):
            d["mask_fcn{}".format(layer_idx)] = misc_nn_ops.Conv2d(
                next_feature, layer_features, kernel_size=3,
                stride=1, padding=dilation, dilation=dilation)
            d["relu{}".format(layer_idx)] = nn.ReLU(inplace=True)
            next_feature = layer_features

        super(MaskRCNNHeads, self).__init__(d)
        for name, param in self.named_parameters():
            if "weight" in name:
                nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
            # elif "bias" in name:
            #     nn.init.constant_(param, 0)

class MaskRCNNPredictor(nn.Sequential):
    def __init__(self, in_channels, dim_reduced, num_classes):
        super(MaskRCNNPredictor, self).__init__(OrderedDict([
            ("conv5_mask", misc_nn_ops.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),
            ("relu", nn.ReLU(inplace=True)),
            ("mask_fcn_logits", misc_nn_ops.Conv2d(dim_reduced, num_classes, 1, 1, 0)),
        ]))

        for name, param in self.named_parameters():
            if "weight" in name:
                nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
            # elif "bias" in name:
            #     nn.init.constant_(param, 0)

#for keypoint
class KeypointRCNNHeads(nn.Sequential):
    def __init__(self, in_channels, layers):
        d = []
        next_feature = in_channels
        for l in layers:
            d.append(misc_nn_ops.Conv2d(next_feature, l, 3, stride=1, padding=1))
            d.append(nn.ReLU(inplace=True))
            next_feature = l
        super(KeypointRCNNHeads, self).__init__(*d)
        for m in self.children():
            if isinstance(m, misc_nn_ops.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
                nn.init.constant_(m.bias, 0)

class KeypointRCNNPredictor(nn.Module):
    def __init__(self, in_channels, num_keypoints):
        super(KeypointRCNNPredictor, self).__init__()
        input_features = in_channels
        deconv_kernel = 4
        self.kps_score_lowres = misc_nn_ops.ConvTranspose2d(
            input_features,
            num_keypoints,
            deconv_kernel,
            stride=2,
            padding=deconv_kernel // 2 - 1,
        )
        nn.init.kaiming_normal_(self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu")
        nn.init.constant_(self.kps_score_lowres.bias, 0)
        self.up_scale = 2
        self.out_channels = num_keypoints

    def forward(self, x):
        x = self.kps_score_lowres(x)
        x = misc_nn_ops.interpolate(x, scale_factor=float(self.up_scale), mode="bilinear", align_corners=False)
        return x

def random_colour_masks(image):
  colours = [[0, 255, 0],[0, 0, 255],[255, 0, 0],[0, 255, 255],[255, 255, 0],[255, 0, 255],[80, 70, 180],[250, 80, 190],[245, 145, 50],[70, 150, 250],[50, 190, 190]]
  r = np.zeros_like(image).astype(np.uint8)
  g = np.zeros_like(image).astype(np.uint8)
  b = np.zeros_like(image).astype(np.uint8)
  r[image == 1], g[image == 1], b[image == 1] = colours[random.randrange(0,10)]
  coloured_mask = np.stack([r, g, b], axis=2)
  return coloured_mask


def main(args):
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    # dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True), args.data_path)
    # dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False), args.data_path)
    transform = get_transform(False)
    # dataset_test = CharacterLoader.CharacterDataset(data_dir="/NetDisk/SSD/SSD-研发部/项目工作目录/OCR项目/拍摄的数据集/邦纳提供图片整理/小图/**/*.jpg",
    #                                                 split="test",
    #                                                 transforms=transform)
    num_classes = 2

    print("Creating model")

    # for resnet
    anchor_sizes = ((48,), (96,), (168,), (256,))
    aspect_ratios = ((0.5, 1., 2.),) * len(anchor_sizes)  #需要是浮点数

    #for mobile net
    # anchor_sizes = ((48, 128, 256))
    # aspect_ratios = ((0.5, 1., 2.),) * len(anchor_sizes)  # 需要是浮点数
    min_size = 512
    max_size = 1200
    box_score_thresh = 0.5

    box_head = TwoMLPHead(in_channels=256, representation_size=256)
    box_predictor = FastRCNNPredictor(in_channels=256, num_classes=num_classes)

    mask_head = MaskRCNNHeads(in_channels=256,layers=(256, 256, 256, 256), dilation=1)
    mask_predictor = MaskRCNNPredictor(in_channels=256, dim_reduced=256,num_classes=num_classes)
    # box_head = None
    # box_predictor = None

    rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
    model = torchvision.models.detection.__dict__[args.model](num_classes=None if box_predictor else num_classes,
                                                              backbone_name="resnet50",
                                                              pretrained=args.pretrained,
                                                              ##**kwargs
                                                              min_size=min_size, max_size=max_size,
                                                              image_mean=(0.485, 0.456, 0.406),
                                                              image_std=(0.229, 0.224, 0.225),

                                                              # RPN parameters
                                                              rpn_anchor_generator=rpn_anchor_generator, rpn_head=None,
                                                              rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
                                                              rpn_post_nms_top_n_train=2000,
                                                              rpn_post_nms_top_n_test=1000,
                                                              rpn_nms_thresh=0.7,
                                                              rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
                                                              rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,

                                                              # Box parameters
                                                              box_roi_pool=None, box_head=box_head, box_predictor=box_predictor,
                                                              box_score_thresh=box_score_thresh, box_nms_thresh=0.5,
                                                              box_detections_per_img=100,
                                                              box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
                                                              box_batch_size_per_image=512, box_positive_fraction=0.25,
                                                              bbox_reg_weights=None,

                                                              # Mask parameters
                                                              mask_roi_pool=None, mask_head=mask_head, mask_predictor=mask_predictor,

                                                              # keypoint parameters
                                                              # keypoint_roi_pool=None, keypoint_head=None,
                                                              # keypoint_predictor=None,num_keypoints=17
                                                              )
    model.to(device)
    model.eval()

    checkpoint = torch.load("model/2020-06-30-14-47-48/maskrcnn_resnet50_fpn_model_11.pth")
    model.load_state_dict(checkpoint['model'])
    colors = itertools.cycle(["red", "green", "blue", "yellow", "cyan", "gold", "purple", "violet", "pink"])
    with torch.no_grad():
        # for path in glob.glob("/media/retoo/RetooDisk/wanghui/Data/Food/test/牛肉@12元/*.jpg"):
        for path in glob.glob("/media/retoo/RetooDisk/wanghui/Data/Food/0624_U2NET_TEST_DATA/JPEGImages/*.jpg",recursive=True)[3:]:
        # for path in glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/小碗菜/泽西城小碗1217/*.jpg",recursive=True):
        # for path in glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/20200111_第29批_泽西城门店43010132_1224-0110测试汇总/*.jpg", recursive=True):
        # for path in glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/大铁锅/*.jpg",recursive=True):
        # for path in glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/20191223_第28批_九峰小区门店43010146_1217-1223测试汇总/*.jpg",recursive=True):
        # for path in glob.glob("/NetDisk/SSD/SSD-研发部/项目工作目录/菜品识别项目/蒸浏记/Pic/原始图像_训练_测试用_除copy外不要进行任何其他操作/20191230_泽西城/**/*.jpg",recursive=True):
        # for path in glob.glob("/media/retoo/RetooDisk/wanghui/Data/Food/test/湖南日报/*.jpg"):

            subdir = os.path.split(os.path.split(path)[0])[1]
            name = os.path.basename(path)
            try:
                src = Image.open(path).convert("RGB")
            except:
                print("error open image %s"%path)
                continue
            data,_ = transform(src, None)
            data = data.unsqueeze(0).to(device)

            ## forward
            start = time.time()
            loss_dict = model(data)
            end = time.time()

            det = loss_dict[0]
            boxes = det["boxes"].cpu().numpy()
            labels = det["labels"].cpu().numpy()
            scores = det["scores"].cpu().numpy()

            ## filter
            index = scores > 0.95
            boxes = boxes[index]
            labels = labels[index]
            scores = scores[index]
            if "masks" in det and det["masks"].size(0) != 0:
                mask = det["masks"].squeeze().cpu().numpy()
                mask = mask[index]
            else:
                mask = []
            save_image = src.copy()
            draw = ImageDraw.Draw(save_image)
            for idx in range(boxes.shape[0]):
                xmin, ymin, xmax, ymax = boxes[idx]
                score = scores[idx]
                label = labels[idx]


                # crop_character = src.crop((xmin, ymin, xmax, ymax))
                # character_save_path = "crop_food/%s/" % subdir
                # os.makedirs(character_save_path, exist_ok=True)
                # crop_character.save("%s/%s_%d.jpg" % (character_save_path, name[:-4],idx))
                draw.rectangle(((xmin, ymin), (xmax, ymax)), fill=None, outline=next(colors), width=20)  ##绘制矩形框，指定外轮廓颜色和填充颜色
                draw.text((xmin, ymin - 100),
                              str(round(score, 2)),
                              font=ImageFont.truetype("simhei.ttf", 100),
                              fill=(255, 0, 0))  ##指定字体和颜色，需要复制字体到本地路径下

            print("process image : {}".format(path), "running time :", end - start)
            save_path = "detection_result/%s" % subdir
            os.makedirs(save_path, exist_ok=True)


            plt.imshow(np.asarray(save_image), cmap='gray')  # I would add interpolation='none'
            if len(mask) > 0:
                mask = np.uint8(mask > 0.5)
                masks = np.stack([mask,np.zeros_like(mask), np.zeros_like(mask)],axis=-1)
                masks = Image.fromarray(masks)
                save_image = Image.blend(save_image, masks, 0.5)
                plt.imshow(np.asarray(masks), cmap='jet', alpha=0.4)  # interpolation='none'
            plt.show()
            # save_image.save("%s/%s" % (save_path, name))
            del draw, src,save_image

            # fig, ax = plt.subplots(1)
            # ax.imshow(np.asarray(src))
            # for idx in range(boxes.shape[0]):
            #     xmin, ymin, xmax, ymax = boxes[idx]
            #     score = scores[idx]
            #     label = labels[idx]
            #
            #     if score > 0.5:
            #         rect = patches.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, linewidth=2, edgecolor='r', facecolor='none')
            #         ax.text(xmin, ymin-10, f"/%{str(round(score,3))}", bbox=dict(facecolor='white', alpha=0.5))
            #         ax.add_patch(rect)
            #
            # plt.show()
            # plt.close()


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument('--data-path', default='', help='dataset')
    parser.add_argument('--dataset', default='pascal_voc', help='dataset')
    parser.add_argument('--model', default='maskrcnn_resnet50_fpn',
                        help='model, choice of [fasterrcnn_mobilenetv2_fpn, fasterrcnn_resnet50_fpn]')
    parser.add_argument('--device', default='cuda', help='device')
    parser.add_argument('-b', '--batch-size', default=2, type=int,
                        help='images per gpu, the total batch size is $NGPU x batch_size')
    parser.add_argument('--epochs', default=10, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')
    parser.add_argument('--lr', default=0.02, type=float,
                        help='initial learning rate, 0.02 is the default value for training '
                             'on 8 gpus and 2 images_per_gpu')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--lr-step-size', default=8, type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
    parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
    parser.add_argument('--output-dir', default='.', help='path where to save')
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
    parser.add_argument('--aspect-ratio-group-factor', default=-1, type=int)
    parser.add_argument(
        "--test-only",
        dest="test_only",
        help="Only test the model",
        action="store_true",
    )
    # 如果pretrained为False, 则会加载backbone的预训练模型，否则会加载faster_rcnn_resnet50_fp整个检测的预训练模型
    parser.add_argument(
        "--pretrained",
        default=False,
        dest="pretrained",
        help="Use pre-trained models from the modelzoo",
        action="store_true",
    )

    # distributed training parameters
    parser.add_argument('--world-size', default=1, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')

    args = parser.parse_args()

    if args.output_dir:
        utils.mkdir(args.output_dir)

    main(args)
