from src.utils.basic_utils import DetectResult, DetectedBox
from src.utils.image_utils import get_homography
import cv2
import numpy as np


def __cross(p1: list, p2: list, p3: list):  # 跨立实验
    x1 = p2[0] - p1[0]
    y1 = p2[1] - p1[1]
    x2 = p3[0] - p1[0]
    y2 = p3[1] - p1[1]
    return x1 * y2 - x2 * y1


def __is_intersec(p1, p2, p3, p4):  # 判断两线段是否相交
    # 快速排斥，以l1、l2为对角线的矩形必相交，否则两线段不相交
    if (max(p1[0], p2[1]) >= min(p3[0], p4[0])  # 矩形1最右端大于矩形2最左端
            and max(p3[0], p4[0]) >= min(p1[0], p2[0])  # 矩形2最右端大于矩形最左端
            and max(p1[1], p2[1]) >= min(p3[1], p4[1])  # 矩形1最高端大于矩形最低端
            and max(p3[1], p4[1]) >= min(p1[1], p2[1])):  # 矩形2最高端大于矩形最低端

        # 若通过快速排斥则进行跨立实验
        if (__cross(p1, p2, p3) * __cross(p1, p2, p4) <= 0
                and __cross(p3, p4, p1) * __cross(p3, p4, p2) <= 0):
            return True
        else:
            return False
    else:
        return False


def __is_poly_overlap(left_poly, right_poly):
    """
    判断平行四边形是否相交
    :param left_poly:
    :param right_poly:
    :return:
    """
    # 判断边是否相交
    for r_idx in range(len(right_poly)):
        for l_idx in range(len(left_poly)):
            if __is_intersec(right_poly[r_idx], right_poly[r_idx - 1], left_poly[l_idx], left_poly[l_idx - 1]):
                return True

    # 判断是否有点落在另外一个多边形里面
    tmp = np.array(left_poly, dtype=np.int32)
    tmp = tmp.reshape((tmp.shape[0], 1, tmp.shape[1]))
    for pt in right_poly:
        if cv2.pointPolygonTest(tmp, tuple(pt), False) > 0:
            return True

    tmp = np.array(right_poly, dtype=np.int32)
    tmp = tmp.reshape((tmp.shape[0], 1, tmp.shape[1]))
    for pt in left_poly:
        if cv2.pointPolygonTest(tmp, tuple(pt), False) > 0:
            return True

    return False


def __merge_detect_result(transformed: DetectResult, left: DetectResult):
    """
    将两个DetectResult拼接起来
    :param transformed: 经过单应矩阵变形之后的DetectResult
    :param left: 另外一个需要拼接的DetectResult
    :return: 拼接好的数据
    """
    dst_img = np.zeros(transformed.img.shape, dtype=np.uint8)
    dst_img[0:left.img.shape[0], 0:left.img.shape[1]] = left.img
    # 拼接图片
    for i in range(dst_img.shape[0]):
        for j in range(dst_img.shape[1]):
            tmp = dst_img[i][j]
            if tmp[0] == 0 and tmp[1] == 0 and tmp[2] == 0:
                dst_img[i][j] = transformed.img[i][j]

    # 去重
    deleted_flag = [False] * len(transformed.positions)
    for i in range(len(transformed.positions)):
        for j in range(len(left.positions)):
            right_poly = transformed.positions[i]  # right_poly: [[x, y], [x, y], [x, y], ...]
            left_poly = left.positions[j]
            if __is_poly_overlap(left_poly, right_poly):
                deleted_flag[i] = True  # 标记为删除
    final_scores = left.scores
    final_labels = left.labels
    final_positions = left.positions
    for i, poly in enumerate(transformed.positions):
        if deleted_flag[i]:
            continue
        final_positions.append(poly)
        final_scores.append(transformed.scores[i])
        final_labels.append(transformed.labels[i])

    return DetectResult(dst_img, [DetectedBox(p, s, l) for p, s, l in zip(final_positions, final_scores, final_labels)])


def __warp_perspective(detect_result: DetectResult, homo: np.array, dsize: tuple):
    """
    对一个DetectResult类型的数据做透视变换，不仅变换图片，还会对坐标做变换
    :param detect_result: 需要变换的数据，DetectResult类型
    :param homo: 变换矩阵
    :param dsize: 结果图片大小
    :return: 变换后的数据
    """
    positions = detect_result.positions
    img = cv2.warpPerspective(detect_result.img, homo, dsize)
    warped_positons = []
    for pos in positions:  # pos: [[x, y], [x, y], [x, y], [x, y], ...]
        tmp = []
        for pt in pos:  # pt: [x, y]
            warped_pt = np.dot(homo, np.array([pt[0], pt[1], 1], dtype=np.float64))
            tmp.append([warped_pt[0] // warped_pt[2], warped_pt[1] // warped_pt[2]])
        warped_positons.append(tmp)
    # detect_result.positions = warped_positons
    detect_result = DetectResult(img, [DetectedBox(p, s, l)
                                       for p, s, l in zip(warped_positons, detect_result.scores, detect_result.labels)])

    return detect_result


def __calc_corners(homo: np.array, img_shape: tuple):
    """
    计算变换后图像的四个顶点坐标
    :param homo: 变换矩阵
    :param img_shape: 原图大小
    :return: 变换后的坐标
    """
    ret = []
    # 变换后图片左上角坐标计算
    left_up_corner = np.array([0, 0, 1])
    left_up_corner = np.dot(homo, left_up_corner)
    ret.append([left_up_corner[0] / left_up_corner[2], left_up_corner[1] / left_up_corner[2]])

    # 变换后图片左下角坐标计算
    left_bottom_corner = np.array([0, img_shape[0], 1])
    left_bottom_corner = np.dot(homo, left_bottom_corner)
    ret.append([left_bottom_corner[0] / left_bottom_corner[2], left_bottom_corner[1] / left_bottom_corner[2]])

    # 变换后图片右上角坐标计算
    right_up_corner = np.array([img_shape[1], 0, 1], dtype=np.float64)
    right_up_corner = np.dot(homo, right_up_corner)
    ret.append([right_up_corner[0] / right_up_corner[2], right_up_corner[1] / right_up_corner[2]])

    # 变换后图片右下角坐标计算
    right_bottom_corner = np.array([img_shape[1], img_shape[0], 1], dtype=np.float64)
    right_bottom_corner = np.dot(homo, right_bottom_corner)
    ret.append([right_bottom_corner[0] / right_bottom_corner[2], right_bottom_corner[1] / right_bottom_corner[2]])

    return ret


def _stitch_imgs(imgs: list, working_size):
    # 缩小图片和坐标值
    multiple = imgs[0].img.shape[0] / working_size
    if multiple > 1:
        for i in range(len(imgs)):
            imgs[i].zoom(1/multiple)

    img_num = len(imgs)
    center = img_num // 2
    img_gray = []
    for i in range(img_num):
        img_gray.append(cv2.cvtColor(imgs[i].img, cv2.COLOR_BGR2GRAY))

    # 求所有图片到相邻图片的单应矩阵
    homo_neig = []
    for i in range(img_num):
        if i < center:
            homo_neig.append(
                get_homography(img_gray[i], img_gray[i + 1]))
        elif i > center:
            homo_neig.append(
                get_homography(img_gray[i], img_gray[i - 1]))
        else:
            homo_neig.append(
                get_homography(img_gray[i], img_gray[center]))

    # 求所有图片到中心图片的单应矩阵
    homo_cen = []
    for i in range(img_num):
        if i < center:
            h = homo_neig[center - 1]
            for j in range(center - 2, i - 1, -1):
                h = np.dot(h, homo_neig[j])
        elif i > center:
            h = homo_neig[center + 1]
            for j in range(center + 2, i + 1):
                h = np.dot(h, homo_neig[j])
        else:
            h = homo_neig[center]
        homo_cen.append(h)

    # 将最左图片向右平移center个中心图片宽度
    h, w = img_gray[center].shape[:2]
    Tx = np.array([[1.0, 0, w], [0, 1.0, 0], [0, 0, 1.0]])
    for i in range(center):
        homo_cen[0] = np.dot(Tx, homo_cen[0])
    corL = __calc_corners(np.array(homo_cen[0]), (img_gray[0].shape[:2]))
    xL = min(int(corL[0][0]), int(corL[1][0]))
    xL_Tx = np.array([[1.0, 0, -xL], [0, 1.0, 0], [0, 0, 1.0]])

    # 将所有图片向右平移(center个中心图片+xL)个宽度
    for i in range(1, img_num):
        for j in range(center):
            homo_cen[i] = np.dot(Tx, homo_cen[i])
        homo_cen[i] = np.dot(xL_Tx, homo_cen[i])
    homo_cen[0] = np.dot(xL_Tx, homo_cen[0])

    # 将所有图片对中心图片做透视变换
    imgn2cen = []
    bot = []
    for i in range(img_num):
        coord = __calc_corners(np.array(homo_cen[i]), (imgs[i].img.shape[:2]))
        bot.append(int(max(coord[1][1], coord[3][1])))
        bottom = max(bot)
        imgn2cen.append(__warp_perspective(imgs[i], homo_cen[i], (w * img_num, bottom)))

    # 求出图像右边界坐标
    corR = __calc_corners(np.array(homo_cen[img_num - 1]), (img_gray[img_num - 1].shape[0:2]))
    xR = max(int(corR[2][0]), int(corR[3][0]))

    # 所有图片投影到背景图片上
    dst = imgn2cen[0]
    for i in range(1, img_num):
        dst = __merge_detect_result(imgn2cen[i], dst)
    out = DetectResult(dst.img[:, :xR], dst.detected_boxes)

    if multiple > 1:
        out.zoom(multiple)  # 对缩小过的图放大还原

    return out


class ImageStitcher:
    """
    拼接图片，并去除重复的检测结果
    """

    def __init__(self, detect_results):
        self._detect_results = detect_results

    def get_result(self, working_size):
        return _stitch_imgs(self._detect_results, working_size)


def main_test():
    img_names = ["D:/workspace/stitch_img/imgs/caffe_1/1.jpg",
                 "D:/workspace/stitch_img/imgs/caffe_1/2.jpg",
                 "D:/workspace/stitch_img/imgs/caffe_1/3.jpg"]
    boundingbox = [[[[411, 304], [470, 302], [469, 336], [409, 337]]],
                   [[[177, 334], [234, 331], [232, 364], [176, 364]]],
                   [[[43, 166], [99, 165], [100, 197], [42, 198]]]]
    stitch_imgs = []
    for idx, name in enumerate(img_names):
        img = cv2.imread(name)
        img = cv2.resize(img, (img.shape[1] // 2, img.shape[0] // 2))
        stitch_imgs.append(DetectResult(img, [DetectedBox(boundingbox[idx][0], 1, "caffe")]))
        stitch_imgs[-1].show()

    stitcher = ImageStitcher(stitch_imgs)
    ret_pano = stitcher.get_result()
    print(ret_pano.positions)
    ret_pano.show()


if __name__ == '__main__':
    main_test()
