import ezdxf, cv2
import math, os, json
from ezdxf.math import Vector as Vec
from collections import namedtuple
import numpy as np

PIXEL = 128
LINE_WIDTH = 2


class Line:
    def __init__(self, id, start, end):
        self.id = id
        self.start = start
        self.end = end
        self.length = get_distance(start, end)
        self.center = ((self.start[0] + self.end[0]) / 2, (self.start[1] + self.end[1]) / 2, 0)


class Vector(Line):
    @property
    def magnitude(self):
        return self.length


Translate = namedtuple("Translate", ["x", "y"])
Transform = namedtuple("Transform", ["x_offset", "y_offset", "scale"])


class UGM:
    def __init__(self, path):
        self.boundingBox = None
        self.path = path
        self.lines = []
        self.name = os.path.basename(path)
        self.location_mapping = {}
        self.roomNum = 0
        self.rooms = []
        self.read_ugm()

    def read_ugm(self):
        with open(self.path, 'r', encoding='utf8') as f:
            ugm_content = json.load(f)

        self.roomNum = len(ugm_content["room"])
        # point_dicts, line_dicts, door_dicts, win_dicts, main_dicts, dry_dicts = ugm_content['point'], ugm_content[
        #     'line'], ugm_content["door"], ugm_content["window"], ugm_content["mainwall"], ugm_content["drywall"]
        point_dicts = ugm_content['point']
        points = []
        for room in ugm_content["room"]:
            self.rooms.append(room["points"])
            points.extend(room["points"])
            for pointIndex in range(len(room["points"])):
                if pointIndex == len(room["points"]) - 1:
                    self.lines.append(Line(room["id"] + str(pointIndex), room["points"][pointIndex], room["points"][0]))
                    continue
                line = Line(room["id"] + str(pointIndex), room["points"][pointIndex], room["points"][pointIndex + 1])
                self.lines.append(line)
                pass
        self.boundingBox = ezdxf.math.BoundingBox(points)
        location_mapping = {p['id']: p['location'] for p in point_dicts}
        self.location_mapping = location_mapping
        # self.boundingBox = ezdxf.math.BoundingBox(list(location_mapping.values()))
        #
        # for op in [*door_dicts, *win_dicts, *main_dicts, *dry_dicts]:
        #     self.lines.append(Line(op["id"], location_mapping[op["lines"][0][0]], location_mapping[op["lines"][0][1]]))
        pass


def get_distance(pt1, pt2):
    if len(pt1) == 3:
        dis = math.sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2 + (pt1[2] - pt2[2]) ** 2)
        return dis
    elif len(pt1) == 2:
        dis = math.sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2)
        return dis


def get_img(ugm, scale, translate=Translate(x=0, y=0)):
    img = np.zeros((PIXEL, PIXEL), np.uint8)
    color = 255
    for _ in ugm.lines:
        start = (int((_.start[0] + translate.x) * scale), int((_.start[1] + translate.y) * scale))
        end = (int((_.end[0] + translate.x) * scale), int((_.end[1] + translate.y) * scale))
        img = cv2.line(img, start, end, color, LINE_WIDTH)
    return img


def get_filled_img(polygons, scale, translate=Translate(x=0, y=0)):
    img = np.zeros((PIXEL, PIXEL), np.uint8)
    color = 255
    for roomPoints in polygons:
        t_points = [[int((p[0] + translate.x) * scale), int((p[1] + translate.y) * scale)] for p in roomPoints]
        array = np.array(t_points)
        img = cv2.fillConvexPoly(img, array, color)
    return img


def angle_to(v0, v1):
    z_base = Vec(0, 0, 1)
    m0 = v0.magnitude
    m1 = v1.magnitude
    cos_value = v0.dot(v1) / m0 / m1
    sin_value = v0.cross(v1).dot(z_base) / m0 / m1

    sin_value = int(sin_value) if sin_value < -1 or sin_value > 1 else sin_value
    cos_value = int(cos_value) if cos_value < -1 or cos_value > 1 else cos_value

    if sin_value >= 0:
        return math.acos(cos_value)
    else:
        if cos_value >= 0:
            return math.pi * 2 + math.asin(sin_value)
        else:
            return math.pi - math.asin(sin_value)


def get_translate_from_vector(t_line, s_line):
    x = t_line.center[0] - s_line.center[0]
    y = t_line.center[1] - s_line.center[1]

    return Translate(x=x, y=y)


def get_downSampling_transform(t_ugm, s_ugm, translate):
    new_s_pts = []

    for p in s_ugm.location_mapping.values():
        point = [p[0] + translate.x, p[1] + translate.y, 0]
        new_s_pts.append(point)

    new_s_box = ezdxf.math.BoundingBox(new_s_pts)
    total_box = t_ugm.boundingBox.union(new_s_box)
    x_offset, y_offset = total_box.extmin[0] - LINE_WIDTH, total_box.extmin[1] - LINE_WIDTH
    length = total_box.size[0] if total_box.size[0] > total_box.size[1] else total_box.size[1]
    scale = (PIXEL - LINE_WIDTH - 2) / length
    return Transform(x_offset, y_offset, scale)


def get_credibility_by_img(img1, img2, isRoom=False):
    num_img1 = np.count_nonzero(img1)
    num_img2 = np.count_nonzero(img2)
    # cv2.imshow("img1", img1)
    # cv2.imshow("img2", img2)
    # cv2.waitKey()
    # print(max(num_img1, num_img2) / min(num_img1, num_img2))

    img3 = img1.__and__(img2)
    num_img3 = np.count_nonzero(img3)
    try:
        credibility = num_img3 / max(num_img1, num_img2)
    except:
        return 0

    return credibility


def check_room_matched(t_ugm, s_ugm, translate):
    dST = get_downSampling_transform(t_ugm, s_ugm, translate)

    t_default_translate = Translate(x=- dST.x_offset,
                                    y=- dST.y_offset)
    s_default_translate = Translate(x=translate.x - dST.x_offset,
                                    y=translate.y - dST.y_offset)
    isRoomMatched = True
    for t_room in t_ugm.rooms:
        creList = []
        img1 = get_filled_img([t_room], dST.scale, t_default_translate)
        for s_room in s_ugm.rooms:
            img2 = get_filled_img([s_room], dST.scale, s_default_translate)
            cre = get_credibility_by_img(img1, img2, True)
            creList.append(cre)
        creMax = max(creList)
        if creMax < 0.8:
            print("room credibility : {}".format(creMax))
            isRoomMatched = False
    return isRoomMatched


def check_parallel(line1, line2):
    v1 = (int(line1.end[0] - line1.start[0]), int(line1.end[1] - line1.start[1]))
    v2 = (int(line2.end[0] - line2.start[0]), int(line2.end[1] - line2.start[1]))
    if v1[0]*v2[1] - v2[0]*v1[1] == 0:
        return True
    return False


def transform(t_ugm, s_ugm):
    credibility_dict = {}
    for t_line in t_ugm.lines:
        if t_line.length < 2000:
            continue
        for s_line in s_ugm.lines:
            if s_line.length < 2000:
                continue
            isParallel = check_parallel(t_line, s_line)
            if max(t_line.length, s_line.length) / min(t_line.length,
                                                       s_line.length) > 1.01 or not isParallel:
                continue

            translate = get_translate_from_vector(t_line, s_line)

            dST = get_downSampling_transform(t_ugm, s_ugm, translate)

            t_default_translate = Translate(x=- dST.x_offset,
                                            y=- dST.y_offset)
            s_default_translate = Translate(x=translate.x - dST.x_offset,
                                            y=translate.y - dST.y_offset)
            img1 = get_img(t_ugm, dST.scale, t_default_translate)
            img2 = get_img(s_ugm, dST.scale, s_default_translate)

            credibility = get_credibility_by_img(img1, img2)

            credibility_dict[credibility] = translate


    try:
        credibility = max(credibility_dict.keys())
    except:
        credibility = 0

    print("credibility : {}".format(credibility))
    if credibility < 0.9:
        return 0, Translate(0, 0)

    translate = credibility_dict[credibility]
    if not check_room_matched(t_ugm, s_ugm, translate):
        return 0, Translate(0, 0)
    return credibility, credibility_dict[credibility]
