from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
    sys.path.append(parent_path)
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import SubElement
import glob
import numpy as np
import six
from PIL import Image, ImageOps
import numpy as np
from PIL import Image, ImageDraw,ImageFont
from scipy import ndimage
import cv2

import paddle
from paddle import fluid

from ppdet.core.workspace import load_config, merge_config, create

from ppdet.utils.eval_utils import parse_fetches
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode
from ppdet.utils.visualizer import visualize_results
import ppdet.utils.checkpoint as checkpoint

from ppdet.data.reader import create_reader

import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)


def get_save_image_name(output_dir, image_path):
    """
    Get save image name from source image path.
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    image_name = os.path.split(image_path)[-1]
    name, ext = os.path.splitext(image_name)
    return os.path.join(output_dir, "{}".format(name)) + ext


def get_test_images(infer_dir, infer_img):
    """
    Get image path list in TEST mode
    """
    assert infer_img is not None or infer_dir is not None, \
        "--infer_img or --infer_dir should be set"
    assert infer_img is None or os.path.isfile(infer_img), \
            "{} is not a file".format(infer_img)
    assert infer_dir is None or os.path.isdir(infer_dir), \
            "{} is not a directory".format(infer_dir)

    # infer_img has a higher priority
    if infer_img and os.path.isfile(infer_img):
        return [infer_img]

    images = set()
    infer_dir = os.path.abspath(infer_dir)
    assert os.path.isdir(infer_dir), \
        "infer_dir {} is not a directory".format(infer_dir)
    exts = ['jpg', 'jpeg', 'png', 'bmp']
    exts += [ext.upper() for ext in exts]
    for ext in exts:
        images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
    images = list(images)

    assert len(images) > 0, "no image found in {}".format(infer_dir)
    logger.info("Found {} inference images in total.".format(len(images)))

    return images


def main():
    cfg = load_config(FLAGS.config)

    merge_config(FLAGS.opt)
    check_config(cfg)
    # check if set use_gpu=True in paddlepaddle cpu version
    check_gpu(cfg.use_gpu)
    # check if paddlepaddle version is satisfied
    check_version()

    main_arch = cfg.architecture

    dataset = cfg.TestReader['dataset']

    test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
    dataset.set_images(test_images)

    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            inputs_def = cfg['TestReader']['inputs_def']
            inputs_def['iterable'] = True
            feed_vars, loader = model.build_inputs(**inputs_def)
            test_fetches = model.test(feed_vars)
    infer_prog = infer_prog.clone(True)

    reader = create_reader(cfg.TestReader, devices_num=1)
    loader.set_sample_list_generator(reader, place)

    exe.run(startup_prog)
    if cfg.weights:
        checkpoint.load_params(exe, infer_prog, cfg.weights)

    # parse infer fetches
    assert cfg.metric in ['COCO', 'VOC', 'OID', 'WIDERFACE'], \
            "unknown metric type {}".format(cfg.metric)
    extra_keys = []
    if cfg['metric'] in ['COCO', 'OID']:
        extra_keys = ['im_info', 'im_id', 'im_shape']
    if cfg['metric'] == 'VOC' or cfg['metric'] == 'WIDERFACE':
        extra_keys = ['im_id', 'im_shape']
    keys, values, _ = parse_fetches(test_fetches, infer_prog, extra_keys)

    # parse dataset category
    if cfg.metric == 'COCO':
        from ppdet.utils.coco_eval import bbox2out, mask2out, segm2out, get_category_info
    if cfg.metric == 'OID':
        from ppdet.utils.oid_eval import bbox2out, get_category_info
    if cfg.metric == "VOC":
        from ppdet.utils.voc_eval import bbox2out, get_category_info
    if cfg.metric == "WIDERFACE":
        from ppdet.utils.widerface_eval_utils import bbox2out, lmk2out, get_category_info

    anno_file = dataset.get_anno()
    with_background = dataset.with_background
    use_default_label = dataset.use_default_label

    clsid2catid, catid2name = get_category_info(anno_file, with_background,
                                                use_default_label)

    # whether output bbox is normalized in model output layer
    is_bbox_normalized = False
    if hasattr(model, 'is_bbox_normalized') and \
            callable(model.is_bbox_normalized):
        is_bbox_normalized = model.is_bbox_normalized()

    # use VisualDL to log image
    if FLAGS.use_vdl:
        assert six.PY3, "VisualDL requires Python >= 3.5"
        from visualdl import LogWriter
        vdl_writer = LogWriter(FLAGS.vdl_log_dir)
        vdl_image_step = 0
        vdl_image_frame = 0  # each frame can display ten pictures at most.

    imid2path = dataset.get_imid2path()
    for iter_id, data in enumerate(loader()):
        outs = exe.run(infer_prog,
                       feed=data,
                       fetch_list=values,
                       return_numpy=False)
        res = {
            k: (np.array(v), v.recursive_sequence_lengths())
            for k, v in zip(keys, outs)
        }
        logger.info('Infer iter {}'.format(iter_id))
        if 'TTFNet' in cfg.architecture:
            res['bbox'][1].append([len(res['bbox'][0])])
        if 'CornerNet' in cfg.architecture:
            from ppdet.utils.post_process import corner_post_process
            post_config = getattr(cfg, 'PostProcess', None)
            corner_post_process(res, post_config, cfg.num_classes)

        bbox_results = None
        mask_results = None
        segm_results = None
        lmk_results = None
        bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized)

        # visualize result
        im_ids = res['im_id'][0]
        for im_id in im_ids:
            image_path = imid2path[int(im_id)]
            image = Image.open(image_path).convert('RGB')
            image = ImageOps.exif_transpose(image)

            # use VisualDL to log original image
            if FLAGS.use_vdl:
                original_image_np = np.array(image)
                vdl_writer.add_image(
                    "original/frame_{}".format(vdl_image_frame),
                    original_image_np, vdl_image_step)

            findMistake(image_path,
                        int(im_id), catid2name,
                                      FLAGS.draw_threshold, bbox_results)

         

def getMessageFromVoc(vocPath):
        fileData = []
        img = []
        if not os.path.isfile(vocPath):
            return [[],[]]
        tree = ET.parse(vocPath)
        root = tree.getroot()
        objs = root.findall("object")
        imgName = root.find("filename").text
        size = root.find("size")
        w = size.findtext("width")
        h = size.findtext("height")
        img.append(imgName)
        img.append(float(h))
        img.append(float(w))
        fileData.append(img)
        objList = []
        for obj in objs:
            objTemp = []
            label = obj.findtext("name")
            bbox = obj.find("bndbox")
            xmin = bbox.findtext("xmin")
            ymin = bbox.findtext("ymin")
            xmax = bbox.findtext("xmax")
            ymax = bbox.findtext("ymax")
            objTemp.append(label)
            objTemp.append(float(xmin))
            objTemp.append(float(ymin))
            objTemp.append(float(xmax))
            objTemp.append(float(ymax))
            objList.append(objTemp)
        fileData.append(objList)
        return fileData

def findMistake(image, im_id, catid2name, threshold,bboxes):
    """
    Draw bbox on image
    """
    
    xmlName=image[image.rfind("/")+1:image.rfind(".")]
    vocPath="/media/oldzhang/Data&Model&Course/data/foodDete/annotations/"+xmlName+".xml"
    vocData=getMessageFromVoc(vocPath)
    preData=[]
    imagePath=image[:image.rfind("/")+1]
    for dt in np.array(bboxes):
        if im_id != dt['image_id']:
            continue
        catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
        if score < threshold:
            continue
        xmin, ymin, w, h = bbox
        xmax = xmin + w
        ymax = ymin + h
        text = "{}".format(catid2name[catid])
        bbox=[text,xmin, ymin ,xmax,ymax]
        preData.append(bbox)
    if len(preData)!=len(vocData[1]):
            image = cv2.imread(imagePath+xmlName+".jpg")
            image=draw_bbox(image, im_id, catid2name, bboxes, threshold)
            logger.info("Detection bbox results save in {}".format(xmlName+".jpg"))
            image.save("wrongSample/"+xmlName+".jpg", quality=95)
            return
    if len(preData)==len(vocData[1]):
            flag=True
            preData=sorted(preData)
            vocData[1]= sorted(vocData[1])
            for i in range(len(preData)):
                if preData[i][0]!=vocData[1][i][0]:
                    flag=False
                    image = cv2.imread(imagePath+xmlName+".jpg")
                    image=draw_bbox(image, im_id, catid2name, bboxes, threshold)
                    logger.info("Detection bbox results save in {}".format(xmlName+".jpg"))
                    image.save("wrongSample/"+xmlName+".jpg", quality=95)
                    break
            if flag:
                for i in range((len(preData))):
                    iou=0
                    rec1=[preData[i][1],preData[i][2],preData[i][3],preData[i][4]]
                    for j in range(len(vocData[1])):
                        rec2=[vocData[1][j][1],vocData[1][j][2],vocData[1][j][3],vocData[1][j][4]]
                        iou=max(iou,compute_IOU(rec1, rec2))
                    if iou<0.5:
                        image = cv2.imread(imagePath+xmlName+".jpg")
                        image=draw_bbox(image, im_id, catid2name, bboxes, threshold)
                        logger.info("Detection bbox results save in {}".format(xmlName+".jpg"))
                        image.save("wrongSample/"+xmlName+".jpg", quality=95)
                        break
        
            
def draw_bbox(image, im_id, catid2name, bboxes, threshold):
    """
    Draw bbox on image
    """
    image=Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(image)

    catid2color = {}
    
    for dt in np.array(bboxes):
        if im_id != dt['image_id']:
            continue
        catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
        if score < threshold:
            continue

        xmin, ymin, w, h = bbox
        xmax = xmin + w
        ymax = ymin + h

        # draw bbox
        draw.line(
            [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
             (xmin, ymin)],
            width=2,
            fill=(255,0,0))

        # draw label
        text = "{} {:.2f}".format(catid2name[catid], score)
        text = text.encode('utf-8').decode('utf-8')
        font = ImageFont.truetype('fonts/simkai.ttf', 40,encoding="utf-8")
        draw.text((xmin, ymin), text, (0, 0, 255),font=font)
        draw.rectangle(
            [(xmin + 1, ymin ), (xmin  + 1, ymin)], fill=(255,0,0))
    return image
def compute_IOU(rec1,rec2):
    """
    计算两个矩形框的交并比。
    :param rec1: (x0,y0,x1,y1)      (x0,y0)代表矩形左上的顶点，（x1,y1）代表矩形右下的顶点。下同。
    :param rec2: (x0,y0,x1,y1)
    :return: 交并比IOU.
    """
    left_column_max  = max(rec1[0],rec2[0])
    right_column_min = min(rec1[2],rec2[2])
    up_row_max       = max(rec1[1],rec2[1])
    down_row_min     = min(rec1[3],rec2[3])
    #两矩形无相交区域的情况
    if left_column_max>=right_column_min or down_row_min<=up_row_max:
        return 0
    # 两矩形有相交区域的情况
    else:
        S1 = (rec1[2]-rec1[0])*(rec1[3]-rec1[1])
        S2 = (rec2[2]-rec2[0])*(rec2[3]-rec2[1])
        S_cross = (down_row_min-up_row_max)*(right_column_min-left_column_max)
        return S_cross/(S1+S2-S_cross)

if __name__ == '__main__':
    enable_static_mode()
    parser = ArgsParser()
    parser.add_argument(
        "--infer_dir",
        type=str,
        default="/media/oldzhang/Data&Model&Course/data/foodDete/images/",
        help="Directory for images to perform inference on.")
    parser.add_argument(
        "--infer_img",
        type=str,
        default=None,
        help="Image path, has higher priority over --infer_dir")
    parser.add_argument(
        "--output_dir",
        type=str,
        default="output",
        help="Directory for storing the output visualization files.")
    parser.add_argument(
        "--draw_threshold",
        type=float,
        default=0.5,
        help="Threshold to reserve the result for visualization.")
    parser.add_argument(
        "--use_vdl",
        type=bool,
        default=False,
        help="whether to record the data to VisualDL.")
    parser.add_argument(
        '--vdl_log_dir',
        type=str,
        default="vdl_log_dir/image",
        help='VisualDL logging directory for image.')
    FLAGS = parser.parse_args()
    main()
