import torch
import numpy as np
import time
import os
import csv
import cv2
import argparse
import json

# 基于Retinanet代码中visualize_single_image.py改造而成,适用于Retinanet模型
# 可以选择显示检测结果,或者根据模型生成json标注文件.

def load_classes(csv_reader):
    result = {}

    for line, row in enumerate(csv_reader):
        line += 1

        try:
            class_name, class_id = row
        except ValueError:
            raise(ValueError('line {}: format should be \'class_name,class_id\''.format(line)))
        class_id = int(class_id)

        if class_name in result:
            raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
        result[class_name] = class_id
    return result


# Draws a caption above the box in an image
def draw_caption(image, box, caption):
    b = np.array(box).astype(int)
    cv2.putText(image, caption, (b[0], b[1]+13), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
    cv2.putText(image, caption, (b[0], b[1]+13), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)


def detect_image(image_path, model_path, class_list, save_json=False):

    with open(class_list, 'r') as f:
        classes = load_classes(csv.reader(f, delimiter=','))

    labels = {}
    for key, value in classes.items():
        labels[value] = key

    model = torch.load(model_path)

    if torch.cuda.is_available():
        model = model.cuda()

    model.training = False
    model.eval()

    box_id = 0
    img_id = 20200000
    annotations_list = []
    images_list = []
    categories_list = [{"supercategory": "none", "id": 0, "name": "plane"},
                       {"supercategory": "none", "id": 1, "name": "head"},
                       {"supercategory": "none", "id": 2, "name": "flame"}]
    for img_name in os.listdir(image_path):
        image = cv2.imread(os.path.join(image_path, img_name))
        if image is None:
            continue

        image_orig = image.copy()
        rows, cols, cns = image.shape

        img_id += 1
        image_label_list = {
            "id": img_id,
            "file_name": img_name,
            "width": cols,
            "height": rows
        }
        images_list.append(image_label_list)

        # 长宽是否按相同比例缩放,True则是;反之则长宽resize到固定大小
        rate_resize = True  # True False

        if rate_resize: # 长宽是否按相同比例缩放,True则是
            smallest_side = min(rows, cols)

            # rescale the image so the smallest side is min_side
            min_side = 608
            max_side = 1024
            scale = min_side / smallest_side

            # check if the largest side is now greater than max_side, which can happen
            # when images have a large aspect ratio
            largest_side = max(rows, cols)

            if largest_side * scale > max_side:
                scale = max_side / largest_side

            # resize the image with the computed scale
            image = cv2.resize(image, (int(round(cols * scale)), int(round((rows * scale)))))
            rows, cols, cns = image.shape

            pad_w = 32 - rows % 32
            pad_h = 32 - cols % 32
            if (pad_w == 32):
                pad_w = 0
            if (pad_h == 32):
                pad_h = 0

            new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
            new_image[:rows, :cols, :] = image.astype(np.float32)
            image = new_image.astype(np.float32)

        else: # 长宽resize到固定大小

            # rescale the image so the smallest side is min_side
            input_size = 608
            input_pad = 32 - input_size % 32
            if(input_pad == 32):
                pad_w = 0
            input_size += input_pad
            scale_w = cols / input_size
            scale_h = rows / input_size

            image = cv2.resize(image, (int(input_size), int((input_size))))
            image = image.astype(np.float32)

        image /= 255
        image -= [0.485, 0.456, 0.406]
        image /= [0.229, 0.224, 0.225]
        image = np.expand_dims(image, 0)
        image = np.transpose(image, (0, 3, 1, 2))

        with torch.no_grad():
            image = torch.from_numpy(image)
            if torch.cuda.is_available():
                image = image.cuda()

            st = time.time()
            scores, classification, transformed_anchors = model(image.cuda().float())

            print('Elapsed time: {}'.format(time.time() - st))
            idxs = np.where(scores.cpu() > 0.5)

            for j in range(idxs[0].shape[0]):
                bbox = transformed_anchors[idxs[0][j], :]

                if rate_resize: # 长宽是否按相同比例缩放,True则是
                    x1 = int(bbox[0] / scale)
                    y1 = int(bbox[1] / scale)
                    x2 = int(bbox[2] / scale)
                    y2 = int(bbox[3] / scale)
                else:  # 长宽resize到固定大小
                    x1 = int(bbox[0] * scale_w)
                    y1 = int(bbox[1] * scale_h)
                    x2 = int(bbox[2] * scale_w)
                    y2 = int(bbox[3] * scale_h)

                label_name = labels[int(classification[idxs[0][j]])]
                print(bbox, classification.shape)
                score = scores[j]
                caption = '{} {:.3f}'.format(label_name, score)

                draw_caption(image_orig, (x1, y1, x2, y2), caption)
                cv2.rectangle(image_orig, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2)

                width = int(x2 - x1)
                height = int(y2 - y1)
                x = int((x1 + x2) / 2)
                y = int((y1 + y2) / 2)
                category_id = classes[label_name]
                box_id += 1
                annotation = {
                    "id": box_id, # 每个bbox的编号
                    "image_id": img_id, # 图片特有id
                    "category_id": category_id, # 类别id
                    "segmentation": [[x1, y1, x1, y2, x2, y1, x2, y2]], # 分割所需的数据
                    "area": width*height, # segmenttation的面积
                    "bbox": [x1, y1, width, height],
                    "iscrowd": 0, # 分割是否有重叠，0 - 无重叠
                    "ignore": 0
                }
                annotations_list.append(annotation)
            if save_json == False:
                cv2.imshow('detections', image_orig)
                cv2.waitKey(0)

    results = {
        "images": images_list,
        "annotations": annotations_list,
        "categories": categories_list
    }
    if save_json == True:
        json.dump(results, open('instances_train2014.json', 'w')) # indent=4



if __name__ == '__main__':
    # './model/retinanet_18.pt'
    detect_image('/home/solan/cxk/all_sar_plane/images/train2014', './retinanet_true.pt', './class.txt', save_json=True)
    # './retinanet_65.pt'

