from typing import DefaultDict
import torch
import json
import xml
from pathlib import Path
import pandas as pd 
from xml.dom.minidom import parse
import os

from collections import defaultdict
dir_root = os.path.split(os.path.abspath(__file__))[0]
def get_file_name(base_root):
    """
    predict_output: file_name
    """
    file_name = base_root.getElementsByTagName('filename')[0].childNodes[0].nodeValue
    return file_name



def get_entity(base_root,num):
    """
    predict_output: The entity related to box
    """
    entity = base_root.getElementsByTagName('name')[num].childNodes[0].nodeValue
    no_num_entity = "_".join(entity.split("-")[1:])
    return no_num_entity


def get_box(base_root,num):
    left_down_point_x=base_root.getElementsByTagName('polygon')[num].childNodes[1].childNodes[1].childNodes[0].nodeValue
    left_down_point_y=base_root.getElementsByTagName('polygon')[num].childNodes[1].childNodes[3].childNodes[0].nodeValue

    right_down_point_x =base_root.getElementsByTagName('polygon')[num].childNodes[3].childNodes[1].childNodes[0].nodeValue
    right_down_point_y=base_root.getElementsByTagName('polygon')[num].childNodes[3].childNodes[3].childNodes[0].nodeValue

    righ_up_point_x= base_root.getElementsByTagName('polygon')[num].childNodes[5].childNodes[1].childNodes[0].nodeValue
    righ_up_point_y =base_root.getElementsByTagName('polygon')[num].childNodes[5].childNodes[3].childNodes[0].nodeValue

    left_up_point_x= base_root.getElementsByTagName('polygon')[num].childNodes[7].childNodes[1].childNodes[0].nodeValue
    left_up_point_y =base_root.getElementsByTagName('polygon')[num].childNodes[7].childNodes[3].childNodes[0].nodeValue

    return [int(float(left_down_point_x)),int(float(left_down_point_y)),int(float(right_down_point_x)),int(float(right_down_point_y)) \
        ,int(float(righ_up_point_x)),int(float(righ_up_point_y)),int(float(left_up_point_x)),int(float(left_up_point_y))]

def get_each_file_info(file_name):
    # read_file
    # dom = parse("/home/zhangqianghao/Documents/JXLZ/shanghai_med/default/med_inv_0.xml")
    # get the object
    file_path="/home/zhangqianghao/Documents/JXLZ/shanghai_med/default/{}".format(file_name.replace("jpg",'xml'))
    dom = parse(file_path)
    root = dom.documentElement
    bboxes =[]
    num_entities = len(root.getElementsByTagName('name'))
    file_name = get_file_name(root)
    for num in range(num_entities-1):
        each_box = get_box(root,num)
        each_entity = get_entity(root,num)
        each_box.append(each_entity)
        if each_box !=[]:
            bboxes.append(each_box)
    return bboxes

def get_ocr_result(file_name):

    file_path = "/home/zhangqianghao/Documents/JXLZ/ocr_result/{}".format(file_name.replace('jpg','json'))
    
    with open(file_path,'r',encoding="utf-8") as fr:
        ocr_result = json.load(fr)
    positions_text = []
    for i in range(len(ocr_result)):
        pos_text = []
        for box in ocr_result[i]["pos"]:
            pos_text.extend(box)
        pos_text.append(ocr_result[i]["text"])

        positions_text.append(pos_text)
    return positions_text

def bb_intersection_over_union(box1, box2):
    # determine the (x, y)-coordinates of the intersection rectangle
    boxA=[box1[0],box1[1],box1[4],box1[5]]
    boxB=[box2[0],box2[1],box2[4],box2[5]]
    xA = max(boxA[0], boxB[0])
    yA = max(boxA[1], boxB[1])
    xB = min(boxA[2], boxB[2])
    yB = min(boxA[3], boxB[3])

    interArea = (xB - xA + 1) * (yB - yA + 1)

    # compute the area of both the prediction and ground-truth rectangles
    boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
    boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)

    # compute the intersection over union by taking the intersection
    # area and dividing it by the sum of prediction + ground-truth
    # areas - the interesection area
    iou = interArea / float(boxAArea + boxBArea - interArea)

    # return the intersection over union value
    return iou

def match_text(file_name):
    cvat_result = get_each_file_info(file_name)
    positions_text=get_ocr_result(file_name)
    num_matched = 0
    matched_result=[]
    
    for cvat_inf in cvat_result:
        boxA = cvat_inf[:-1]
        label = cvat_inf[-1]

        for orc_inf in positions_text:
            boxB = orc_inf[:-1]
            text = orc_inf[-1]

            if bb_intersection_over_union(boxA,boxB) >0.1:
                tmp_box = []
                tmp_box.extend(boxB)
                tmp_box.append(text)
                tmp_box.append(label)
                matched_result.append(tmp_box)
                num_matched +=1
                # print(tmp_box)
                # print(text)
                # bbox_label = boxB.append(label).append(text)
                # matched_result.append(bbox_label)
    return matched_result,num_matched


def get_entity_text(matched_data):
    result_dict = defaultdict(str)
    for bbt in matched_data:
        entity = bbt[-1]
        text = bbt[-2]
        result_dict[entity]=result_dict[entity]+','+text
        
    return result_dict

def get_all_entities(path_dir):
    entities = []
    for file in path_dir.glob("*.txt"):
        with open(file,'r',encoding='utf-8') as f:
            tmp_result = json.load(f)
            for k,v in tmp_result.items():
                entities.append(k)
    return list(set(entities))

def justify_image(jpg_file):
    """
    function justify whether the image was damaged
    """

    with open(jpg_file, 'rb') as f:
        f.seek(-2, 2)
        if f.read() ==  b'\xff\xd9' :
            return True
        else:
            return False

if __name__=="__main__":
    ocr_result = get_ocr_result("med_inv_1.jpg")
    file_name = "med_inv_1.tsv"
    ocr_save_path = "/home/zhangqianghao/Documents/python-project/mypick/pipeline/boxes_and_transcripts/{}".format(file_name)
    ocr_df = pd.DataFrame(ocr_result)
    ocr_df.to_csv(ocr_save_path, header=False, sep=',', mode='w')
    # file_name = "med_inv_102.jpg"
    # image_path ="/home/zhangqianghao/Documents/JXLZ/shanghai_med/default/{}".format(file_name)
    # print(justify_image(image_path))

    # file_names = ["med_inv_1.jpg",
    #               "med_inv_10.jpg",
    #               "med_inv_100.jpg",
    #               "med_inv_102.jpg",
    #               "med_inv_0.jpg"]
    # for file_name in file_names:
    #     matched_data,_= match_text(file_name)
    #     tsv_dir = "data/data_examples_root/boxes_and_transcripts/"
    #     json_dir = "data/data_examples_root/entities/"
    #     tsv_file = tsv_dir + file_name.replace("jpg","tsv")
    #     json_file = json_dir + file_name.replace("jpg","txt")
    #
    #     tsv_df = pd.DataFrame(matched_data)
    #     json_result = get_entity_text(matched_data)
    #
    #
    #     with open(json_file,"w",encoding="utf-8") as fw:
    #         json.dump(json_result,fw,indent=1,ensure_ascii=False)
    #
    #     tsv_df.to_csv(tsv_file,header=False,sep=',',mode='w')

    # path_dir = Path(dir_root+"/data/data_examples_root/entities/")
    # entities_list_output_dir="data/entity_list/"
    # entities_list_output=list(set(get_all_entities(path_dir)))
    # with open(entities_list_output_dir+"entities_list.txt",'w',encoding="utf-8") as fw:
    #     for line in entities_list_output:
    #         fw.write(str(line)+",")
    #         fw.write("\n")

    