import os
import sys
import argparse
from PIL import Image

import acl
import cv2 as cv
import numpy as np
from tqdm import tqdm

from acllite_model import AclLiteModel
from acllite_resource import AclLiteResource

from metric import PascalVOCDetectionEvaluator

# Custom dataset's labels
dataset_name = "Jiangxiang-VOC-test"
labels = ["hole", "crack"]
colors = [(255, 0, 0), (0, 255, 0)]

mode = "metric"
split = "test"
DATA_DIR = "datasets/VOC2007/"
TEST_TXT = "datasets/VOC2007/ImageSets/Main/test.txt"
INPUT_DIR = "datasets/VOC2007/JPEGImages/"
OUTPUT_DIR = "output"
MODEL_PATH = "fsdet_model.om"
MODEL_WIDTH = 800
MODEL_HEIGHT = 800

iou_threshold = 0.
confidence_threshold = 0.

class_num = len(labels)

def preprocess(img_path):
    image = Image.open(img_path)
    img_h = image.size[1]
    img_w = image.size[0]
    net_h = MODEL_HEIGHT
    net_w = MODEL_WIDTH

    scale = min(float(net_w) / float(img_w), float(net_h) / float(img_h))
    new_w = int(img_w * scale)
    new_h = int(img_h * scale)

    shift_x = (net_w - new_w) // 2
    shift_y = (net_h - new_h) // 2
    shift_x_ratio = (net_w - new_w) / 2.0 / net_w
    shift_y_ratio = (net_h - new_h) / 2.0 / net_h

    image_ = image.resize((new_w, new_h))
    new_image = np.zeros((net_h, net_w, 3), np.uint8)
    new_image[shift_y: new_h + shift_y, shift_x: new_w + shift_x, :] = np.array(image_)
    new_image = new_image.astype(np.float32)
    # pixel_mean = np.array([103.53, 116.28, 123.675]).reshape(1, 1, -1)
    # pixel_std = np.array([1.0, 1.0, 1.0]).reshape(1, 1, -1)
    # new_image = (new_image - pixel_mean) / pixel_std 
    #new_image = new_image / 255
    #print('new_image.shape', new_image.shape)
    new_image = new_image.transpose(2, 0, 1).copy()
    return new_image, image

def overlap(x1, x2, x3, x4):
    left = max(x1, x3)
    right = min(x2, x4)
    return right - left

def cal_iou(box, truth):
    #print(box, truth)
    w = overlap(box[0], box[2], truth[0], truth[2])
    h = overlap(box[1], box[3], truth[1], truth[3])
    if w <= 0 or h <= 0:
        return 0
    inter_area = w * h
    union_area = (box[2] - box[0]) * (box[3] - box[1]) + (truth[2] - truth[0]) * (truth[3] - truth[1]) - inter_area
    return inter_area * 1.0 / union_area

def apply_nms(boxes, scores, thres):
    #all_boxes = [[] for i in range(class_num)]
    #all_idx = [[] for i in range(class_num)]
    idx = scores.argsort()
    keep = []
    
    while len(idx) > 0:
        max_score_idx = idx[-1]
        max_score_box = boxes[max_score_idx]
        keep.append(max_score_idx)
        if len(idx) == 1:
            break
        idx = idx[:-1]

        below_thres_idx = []
        other_boxes = boxes[idx]
        #print(other_boxes)
        for i, box in enumerate(other_boxes):
            #print(cal_iou(max_score_box, box))
            if cal_iou(max_score_box, box) <= thres:
                below_thres_idx.append(i)
        idx = idx[below_thres_idx]
    #print("KEEP: ", keep)
    #keep = idx.extend(keep)
    return keep
    """
    for i, box in enumerate(boxes):
        class_id = classes[i]
        all_boxes[class_id].append(box)
        all_idx[class_id].append(i)
    
    for cls in range(class_num):
        cls_bboxes = all_boxes[cls]
        #sorted_boxes = sorted(cls_bboxes, key=lambda d: d[5])[::-1]

        p = dict()
        #for i in range(len(sorted_boxes)):
        for i in range(len(cls_bboxes)): 
            if i in p:
                continue
            #truth = sorted_boxes[i]
            truth = cls_bboxes[i]
            #for j in range(i + 1, len(sorted_boxes)):
            for j in range(i + 1, len(cls_bboxes)):
                if j in p:
                    continue
                #box = sorted_boxes[j]
                box = cls_bboxes[j]
                iou = cal_iou(box, truth)
                if iou >= thres:
                    p[j] = 1
        #for i in range(len(sorted_boxes)):
        for i in range(len(cls_bboxes)):
            if i not in p:
                keep.append(all_idx[cls][i])

    return keep
    """

"""
def _sigmoid(x):
    return 1.0 / (1 + np.exp(-x))

def decode_bbox(conv_output, anchors, img_w, img_h, x_scale, y_scale, shift_x_ratio, shift_y_ratio):
    print('conv_output.shape', conv_output.shape)
    _, _, h, w = conv_output.shape
    conv_output = conv_output.transpose(0, 2, 3, 1)
    pred = conv_output.reshape((h * w, 3, 5 + class_num))
    pred[..., 4:] = _sigmoid(pred[..., 4:])
    pred[..., 0] = (_sigmoid(pred[..., 0]) + np.tile(range(w), (3, h)).transpose((1, 0))) / w
    pred[..., 1] = (_sigmoid(pred[..., 1]) + np.tile(np.repeat(range(h), w), (3, 1)).transpose((1, 0))) / h
    pred[..., 2] = np.exp(pred[..., 2]) * anchors[:, 0:1].transpose((1, 0)) / w
    pred[..., 3] = np.exp(pred[..., 3]) * anchors[:, 1:2].transpose((1, 0)) / h

    bbox = np.zeros((h * w, 3, 4))
    bbox[..., 0] = np.maximum((pred[..., 0] - pred[..., 2] / 2.0 - shift_x_ratio) * x_scale * img_w, 0)  # x_min
    bbox[..., 1] = np.maximum((pred[..., 1] - pred[..., 3] / 2.0 - shift_y_ratio) * y_scale * img_h, 0)  # y_min
    bbox[..., 2] = np.minimum((pred[..., 0] + pred[..., 2] / 2.0 - shift_x_ratio) * x_scale * img_w, img_w)  # x_max
    bbox[..., 3] = np.minimum((pred[..., 1] + pred[..., 3] / 2.0 - shift_y_ratio) * y_scale * img_h, img_h)  # y_max
    pred[..., :4] = bbox
    pred = pred.reshape((-1, 5 + class_num))
    pred[:, 4] = pred[:, 4] * pred[:, 5:].max(1)
    pred[:, 5] = np.argmax(pred[:, 5:], axis=-1)
    pred = pred[pred[:, 4] >= 0.2]
    print('pred[:, 5]', pred[:, 5])
    print('pred[:, 5] shape', pred[:, 5].shape)

    all_boxes = [[] for ix in range(class_num)]
    for ix in range(pred.shape[0]):
        box = [int(pred[ix, iy]) for iy in range(4)]
        box.append(int(pred[ix, 5]))
        box.append(pred[ix, 4])
        all_boxes[box[4] - 1].append(box)
    return all_boxes

def convert_labels(label_list):
    if isinstance(label_list, np.ndarray):
        label_list = label_list.tolist()
        label_names = [labels[int(index)] for index in label_list]
    return label_names
"""

def post_process(infer_output, origin_img):
    #print("post process")
    result_return = dict()
    img_h = origin_img.size[1]
    img_w = origin_img.size[0]
    scale = min(float(MODEL_WIDTH) / float(img_w), float(MODEL_HEIGHT) / float(img_h))
    new_w = int(img_w * scale)
    new_h = int(img_h * scale)
    shift_x = (MODEL_WIDTH - new_w) / 2
    shift_y = (MODEL_HEIGHT - new_h) / 2
    #shift_x_ratio = (MODEL_WIDTH - new_w) / 2.0 / MODEL_WIDTH
    #shift_y_ratio = (MODEL_HEIGHT - new_h) / 2.0 / MODEL_HEIGHT
    
    #num_channel = 3 * (class_number + 5)
    #x_scale = MODEL_WIDTH / float(new_w)
    #y_scale = MODEL_HEIGHT / float(new_h)
    
    #print(shift_x, " ", shift_y)
    infer_output[1][:, [0, 2]] = (infer_output[1][:, [0, 2]] - shift_x) / scale
    infer_output[1][:, [1, 3]] = (infer_output[1][:, [1, 3]] - shift_y) / scale


    class_number = len(labels)
    #all_boxes = [[] for ix in range(class_number)]
    #print(infer_output[0].shape) # Scores
    #print(infer_output[1].shape) # Boxes 
    #print(infer_output[2].shape) # Classes
    scores = infer_output[0]
    boxes = infer_output[1]
    classes = infer_output[2]
    
    keep = scores > confidence_threshold
    scores = scores[keep]
    boxes = boxes[keep]
    classes = classes[keep]
    
    #print(scores, boxes, classes)

    #all_boxes = [[] for i in range(class_num)]
    #for i in range(class_num):
    #    for j, img_class in enumerate(classes):
    #        if img_class.item() == i:
    #            all_boxes[i].append(boxes[j])
    #print(classes, all_boxes)

        #pred = infer_output[ix]
        #print('pred.shape', pred.shape)
        #anchors = anchor_list[ix]
        #boxes = decode_bbox(pred, anchors, img_w, img_h, x_scale, y_scale, shift_x_ratio, shift_y_ratio)
        #all_boxes = [all_boxes[iy] + boxes[iy] for iy in range(class_number)]
    
    
    
    #keep = None
    keep = apply_nms(boxes, scores, iou_threshold)
    print(keep)

    if not keep:
        result_return['detection_classes'] = []
        result_return['detection_boxes'] = []
        result_return['detection_scores'] = []
        return result_return
    else:
        #new_res = np.array(res)
        #picked_boxes = new_res[:, 0:4]
        #picked_boxes = picked_boxes[:, [1, 0, 3, 2]]
        #picked_classes = convert_labels(new_res[:, 4])
        #picked_score = new_res[:, 5]

        result_return['detection_classes'] = classes[keep]
        result_return['detection_boxes'] = boxes[keep]
        result_return['detection_scores'] = scores[keep]
        return result_return

"""
def preprocess_frame(bgr_img):
    bgr_img = bgr_img[:, :, ::-1]
    image = bgr_img
    image = LaneFinder.Image.fromarray(image.astype('uint8'), 'RGB')

    fframe = np.array(image)
    fframe = lf.process_image(fframe, False)
    frame = LaneFinder.Image.fromarray(fframe)
    framecv = cv.cvtColor(np.asarray(frame), cv.COLOR_RGB2BGR)
    return framecv

def calculate_position(bbox, transform_matrix, warped_size, pix_per_meter):
    if len(bbox) == 0:
        print('Nothing')
    else:
        point = np.array((bbox[1] / 2 + bbox[3] / 2, bbox[2])).reshape(1, 1, -1)
        pos = cv.perspectiveTransform(point, transform_matrix).reshape(-1, 1)
        return np.array((warped_size[1] - pos[1]) / pix_per_meter[1])
"""

def main():
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    
    # ACL resource initialization
    acl_resource = AclLiteResource()
    acl_resource.init()
    
    # load model
    model = AclLiteModel(MODEL_PATH)

    # Load image path
    images_list = [os.path.join(INPUT_DIR, img_id[:-1] + ".jpg")
            for img_id in open(TEST_TXT, "r")]
                   #if os.path.splitext(img)[1] in const.IMG_EXT]
    
    print("IOU Threshold: ", iou_threshold)
    print("CF Threshold: ", confidence_threshold)

    import time
    time_list = []
    result_dict = {}
    img_id_list = []
    # Read images from the data directory one by one for reasoning
    for pic in tqdm(images_list, desc="Inferencing"):
        img_id = os.path.basename(pic).split(".")[0]

        img_id_list.append(img_id)

        # read image
        bgr_img = cv.imread(pic)
        
        # preprocess
        data, orig = preprocess(pic)
        
        # Send into model inference
        start_time = time.time()
        result_list = model.execute([data,])
        time_list.append(time.time() - start_time)
        #print(result_list)

        # Process predict results
        result_return = post_process(result_list, orig)
        result_dict[img_id] = result_return

        if mode == "metric":
            continue
            #break
        #print("result = ", result_return)

        #Process lane line
        #frame_with_lane = preprocess_frame(bgr_img)
        #distance = np.zeros(shape=(len(result_return['detection_classes']), 1))
        #new_data = data.transpose((1, 2, 0))
        
        #print(new_data.shape)
        datacv = bgr_img.copy()
        for i in range(len(result_return["detection_boxes"])):
            label_id = result_return["detection_classes"][i]
            label = labels[label_id]
            box = result_return["detection_boxes"][i]
            score = result_return["detection_scores"][i]
            #cv.putText(data, label, (int(box[1]) + 10, int(box[2]) + 15), cv.FONT_ITALIC, 0.6, colors[i % 6], 1)
            cv.rectangle(datacv, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), colors[label_id % 2], thickness=1)
            p3 = (max(int(box[0]), 15), max(int(box[1]), 15))
            cv.putText(datacv, label, p3, cv.FONT_HERSHEY_COMPLEX, 0.6, colors[label_id % 2], 1)
        
        output_file = os.path.join(OUTPUT_DIR, "out_" + os.path.basename(pic))
        #print("output:%s" % output_file)
        #break
        cv.imwrite(output_file, datacv)
    print("Average Inference Time: {} s/img".format(sum(time_list) / len(time_list)))

    evaluator = PascalVOCDetectionEvaluator(dataset_name, labels, DATA_DIR, split)
    evaluator.reset()
    evaluator.process(img_id_list, result_dict)
    evaluator.evaluate()

    print("Execute end")


        #for i in range(len(result_return['detection_classes'])):
        #    box = result_return['detection_boxes'][i]
        #    class_name = result_return['detection_classes'][i]
        #    confidence = result_return['detection_scores'][i]
        #    distance[i] = calculate_position(bbox=box, transform_matrix=perspective_transform,
        #                warped_size=WARPED_SIZE, pix_per_meter=pixels_per_meter)
        #    label_dis = '{} {:.2f}m'.format('dis:', distance[i][0])
        #    cv.putText(frame_with_lane, label_dis, (int(box[1]) + 10, int(box[2]) + 15),
        #                cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

        #    cv.rectangle(frame_with_lane, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), colors[i % 6])
        #    p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
        #    out_label = class_name
        #    cv.putText(frame_with_lane, out_label, p3, cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

        #output_file = os.path.join(OUTPUT_DIR, "out_" + os.path.basename(pic))
        #print("output:%s" % output_file)
        #cv.imwrite(output_file, frame_with_lane)
    #print("Execute end")

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Run inference of Fsdet om model')
    parser.add_argument("--model", type=str, default="./fsdet_model.om", help="om model path")
    parser.add_argument("--height", type=int, default=800)
    parser.add_argument("--width", type=int, default=800)
    parser.add_argument("--mode", type=str, default="metric", choices=["metric", "predict"])
    parser.add_argument("--txt", type=str, default="datasets/VOC2007/ImageSets/Main/test.txt", help="the image id list txt file")
    parser.add_argument("--split", type=str, default="test")
    parser.add_argument("--data_dir", type=str, default="datasets/VOC2007/")
    parser.add_argument("--cf_thres", type=float, default=0.05)
    parser.add_argument("--iou_thres", type=float, default=0.5)
    args = parser.parse_args()

    mode = args.mode
    split = args.split
    MODEL_PATH = args.model
    MODEL_HEIGHT = args.height
    MODEL_WIDTH = args.width
    DATA_DIR = args.data_dir
    TXT_PATH = args.txt
    
    iou_threshold = args.iou_thres
    confidence_threshold = args.cf_thres

    #if mode == "metric":
    #iou_threshold = 0

    import gc
    #path = './configure.json'
    #config_file = open(path, "rb")
    #fileJson = json.load(config_file)
    #cam_matrix = fileJson[0]["cam_matrix"]
    #dist_coeffs = fileJson[0]["dist_coeffs"]
    #perspective_transform = fileJson[0]["perspective_transform"]
    #pixels_per_meter = fileJson[0]["pixels_per_meter"]
    #WARPED_SIZE = fileJson[0]["WARPED_SIZE"]
    #ORIGINAL_SIZE = fileJson[0]["ORIGINAL_SIZE"]

    #cam_matrix = np.array(cam_matrix)
    #dist_coeffs = np.array(dist_coeffs)
    #perspective_transform = np.array(perspective_transform)
    #pixels_per_meter = tuple(pixels_per_meter)
    #WARPED_SIZE = tuple(WARPED_SIZE)
    #ORIGINAL_SIZE = tuple(ORIGINAL_SIZE)
    #lf = LaneFinder.LaneFinder(ORIGINAL_SIZE, WARPED_SIZE, cam_matrix, dist_coeffs,
                    #perspective_transform, pixels_per_meter)
    main()
    gc.collect()
