import os
import time
import json
import glob
import tqdm

import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
from torchvision import transforms

from network_files import MaskRCNN
from backbone import resnet50_fpn_backbone,resnet18_fpn_backbone
from draw_box_utils import draw_objs


def create_model(num_classes, fpn_backbone='r18', box_thresh=0.5):
    if fpn_backbone == 'r18':
        backbone = resnet18_fpn_backbone()
    elif fpn_backbone == 'r50':
        backbone = resnet50_fpn_backbone()
    model = MaskRCNN(backbone,
                     num_classes=num_classes,
                     rpn_score_thresh=box_thresh,
                     box_score_thresh=box_thresh)

    return model


def time_synchronized():
    torch.cuda.synchronize() if torch.cuda.is_available() else None
    return time.time()


def main(args):
    # get devices
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))
    print("using model: {}".format(args.weights_path))
    # create model
    model = create_model(num_classes=args.num_classes + 1, fpn_backbone=args.backbone,box_thresh=args.box_thresh)

    # load train weights
    assert os.path.exists(args.weights_path), "{} file dose not exist.".format(args.weights_path)
    weights_dict = torch.load(args.weights_path, map_location='cpu')
    weights_dict = weights_dict["model"] if "model" in weights_dict else weights_dict
    model.load_state_dict(weights_dict)
    model.to(device)

    # read class_indict
    assert os.path.exists(args.label_json_path), "json file {} dose not exist.".format(args.label_json_path)
    with open(args.label_json_path, 'r') as json_file:
        category_index = json.load(json_file)

    # load image
    # assert os.path.exists(img_path), f"{img_path} does not exits."
    if args.input:
        input_isdir = False
        # 先判断输出路径是否存在
        if not os.path.exists(args.output):
            os.makedirs(args.output)
        # 判断输入是是文件还是目录
        if os.path.isdir(args.input):
            input_isdir = True
            inputs = [os.path.join(args.input, fname) for fname in os.listdir(args.input)]
        else:
            assert os.path.exists(args.input), f"{args.input} does not exits."
            inputs = [args.input]
        for img_path in tqdm.tqdm(inputs):
            original_img = Image.open(img_path).convert('RGB')

            # from pil image to tensor, do not normalize image
            data_transform = transforms.Compose([transforms.ToTensor()])
            img = data_transform(original_img)
            # expand batch dimension
            img = torch.unsqueeze(img, dim=0)

            model.eval()  # 进入验证模式
            with torch.no_grad():
                # init
                img_height, img_width = img.shape[-2:]
                init_img = torch.zeros((1, 3, img_height, img_width), device=device)
                model(init_img)

                t_start = time_synchronized()
                predictions = model(img.to(device))[0]
                t_end = time_synchronized()
                print("inference+NMS time: {}".format(t_end - t_start))

                predict_boxes = predictions["boxes"].to("cpu").numpy()
                predict_classes = predictions["labels"].to("cpu").numpy()
                predict_scores = predictions["scores"].to("cpu").numpy()
                predict_mask = predictions["masks"].to("cpu").numpy()
                predict_mask = np.squeeze(predict_mask, axis=1)  # [batch, 1, h, w] -> [batch, h, w]

                if len(predict_boxes) == 0:
                    print("没有检测到任何目标!")
                    # return

                plot_img = draw_objs(original_img,
                                     boxes=predict_boxes,
                                     classes=predict_classes,
                                     scores=predict_scores,
                                     masks=predict_mask,
                                     category_index=category_index,
                                     line_thickness=3,
                                     font='arial.ttf',
                                     font_size=20)
                plt.imshow(plot_img)
                plt.show()
                # 保存预测的图片结果
                if input_isdir:
                    plot_img.save(os.path.join(args.output, img_path.split('/')[-1][:-3]) + "jpg")
                else:
                    plot_img.save(os.path.join(args.output, "test_road.jpg"))


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(
        description=__doc__)

    # 输入的图片或者目录
    parser.add_argument('--input', type=str, help='Input image file or folder path.')
    # 保存的预测结果的地址
    parser.add_argument('--output', default="./outputs", type=str, help='A file or directory to save output visualizations.')
    # 待推理的模型
    parser.add_argument('--weights-path', default='./multi_train_r18/model_35.pth', help='weights path')
    # backbone类型
    parser.add_argument('--backbone', default='r18', help='backbone type')
    # 检测目标类别数(不包含背景)
    parser.add_argument('--num-classes', default=6, type=int, help='num_classes')
    # box筛选阈值
    parser.add_argument("--box-thresh", type=float, default=0.3, help="Minimum score for instance predictions to be shown")
    # 类别索引和类别名称对应关系
    parser.add_argument('--label-json-path', type=str, default="coco91_indices.json")
    args = parser.parse_args()
    print(args)

    main(args)

