import cv2
import numpy as np


def get_result_points(p1, p2, p3, p4):
    delta12x = (p2[0] - p1[0])
    delta12y = (p2[1] - p1[1])
    r2x = int(p4[0] + (61 / 46) * delta12x)
    r2y = int(p4[1] + (61 / 46) * delta12y)
    r1x = int(p4[0] - (15 / 46) * delta12x)
    r1y = int(p4[1] - (15 / 46) * delta12y)
    delta14x = p4[0] - p1[0]
    delta14y = p4[1] - p1[1]
    r4x = int(r1x + (55 / 23) * delta14x)
    r4y = int(r1y + (55 / 23) * delta14y)
    r3x = int(r2x + (55 / 23) * delta14x)
    r3y = int(r2y + (55 / 23) * delta14y)

    r1x = int(r1x + (23 / 64) * delta14x)
    r1y = int(r1y + (23 / 64) * delta14y)
    r2x = int(r2x + (23 / 64) * delta14x)
    r2y = int(r2y + (23 / 64) * delta14y)

    return (r1x, r1y), (r2x, r2y), \
           (r3x, r3y), (r4x, r4y)


class Photo:
    def __init__(self, filepath):
        self.get = None
        self.k1 = None
        self.k2 = None
        self.img = cv2.imread(filepath)
        if self.img.shape[0] > 2500 or self.img.shape[1] > 2500:
            self.resize(50)
        self.backup = self.img.copy()

    def image(self):
        return self.img

    # 逆时针旋转90度, img的shape会变
    def rotate(self):
        rows, cols, depth = self.img.shape
        newImage = np.zeros((cols, rows, 3), np.uint8)
        for r in range(0, rows):
            for c in range(0, cols):
                newImage[cols - 1 - c, r] = self.img[r, c]
        self.img = newImage

    # 水平镜像
    def horizontal_mirror(self):
        self.img = cv2.flip(self.img, 1, dst=None)

    # 水平镜像
    def vertical_mirror(self):
        self.img = cv2.flip(self.img, 0, dst=None)

    # ratio: [1, 500]
    def resize(self, ratio):
        self.img = cv2.resize(self.img, dsize=None, fx=ratio / 100.0, fy=ratio / 100.0, interpolation=cv2.INTER_CUBIC)

    # value: [-100, 100]
    def lighting(self, value):
        # 具体做法先归一化到1，然后gamma作为指数值求出新的像素值再还原
        gamma_table = [np.power(x / 255.0, pow(10, -value / 50)) * 255.0 for x in range(256)]
        gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
        # 实现映射用的是Opencv的查表函数
        self.img = cv2.LUT(self.img, gamma_table)

    # value: [-100, 100]
    def contrast(self, value):
        lst = pow(10, value / 50) * self.img
        lst += np.mean(self.img - lst)
        np.clip(lst, 0, 255, self.img)

    # value: [-100, 100]
    def saturate(self, value):
        hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)
        hsv[:, :, 1] = pow(10, value / 50) * hsv[:, :, 1]
        np.clip(hsv[:, :, 1], 0, 255, hsv[:, :, 1])
        self.img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    # value: [0, 180]
    def hue(self, value):
        hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

        for i in range(0, int(value / 70)):
            hsv[:, :, 0] = (hsv[:, :, 0] + 70) % 180

        hsv[:, :, 0] = (hsv[:, :, 0] + (value % 70)) % 180

        self.img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    # value: [0, 20]
    def blur(self, value):
        self.img = cv2.blur(self.img, (2 * value + 1, 2 * value + 1))

    # value: [0, 20]
    def median_blur(self, value):
        self.img = cv2.medianBlur(self.img, 2 * value + 1)

    # value: [0, 20]
    def gaussian_blur(self, value):
        self.img = cv2.GaussianBlur(self.img, (2 * value + 1, 2 * value + 1), 0)

    def to_gray(self):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        self.img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

    def invert(self):
        pic = np.full_like(self.img, 255)
        self.img = pic - self.img

    def to_binary_adaptive(self):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
        self.img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

    def to_binary_ostu(self):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        ret, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        self.img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

    # threshold: [0, 127]
    def to_binary(self, threshold):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        ret, gray = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
        self.img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

    # value: [0, 20]
    def sharpen(self, value):
        blur = cv2.GaussianBlur(self.img, (5, 5), 0)
        self.img = cv2.addWeighted(self.img, value + 1, blur, -value, 0)

    def open(self):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        ret, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
        open_img = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
        self.img = cv2.cvtColor(open_img, cv2.COLOR_GRAY2BGR)

    def close(self):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        ret, gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (4, 4))
        close = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel)
        self.img = cv2.cvtColor(close, cv2.COLOR_GRAY2BGR)

    def origin(self):
        self.img = self.backup

    def scale(self):
        self.img = cv2.convertScaleAbs(self.img, 1.5, 2)

    def equalizeHist(self):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        gray_equalize = cv2.equalizeHist(gray)
        self.img = cv2.cvtColor(gray_equalize, cv2.COLOR_GRAY2BGR)

    def copy(self, o):

        other = o[108:127, 79:111]
        self.img[187:206, 79:111] = other[:19, :32]
        self.gaussian_blur(3)

    def detectAntigen(self):
        qrDecoder = cv2.QRCodeDetector()
        self.to_gray()
        self.gaussian_blur(1)
        self.scale()
        data, bbox, rectifiedImage = qrDecoder.detectAndDecode(self.img)
        if len(data) > 0:
            p1 = tuple(bbox[0][0].astype(np.int32))
            p2 = tuple(bbox[0][1].astype(np.int32))
            p3 = tuple(bbox[0][2].astype(np.int32))
            p4 = tuple(bbox[0][3].astype(np.int32))
            print(bbox[0])

            r1, r2, r3, r4 = get_result_points(bbox[0][0], bbox[0][1],
                                               bbox[0][2], bbox[0][3])

            lines2 = [[r1, r2], [r2, r3], [r1, r4], [r3, r4]]
            '''
            for line in lines2:
                cv2.line(self.img, line[0], line[1], (255, 0, 0), 10)

            lines = [[p1, p2], [p2, p3], [p1, p4], [p3, p4]]
            for line in lines:
                cv2.line(self.img, line[0], line[1], (255, 0, 0), 10)
            '''
            src = np.float32([list(r1), list(r2), list(r3), list(r4)])
            dst = np.float32([[0, 0], [190, 0], [190, 320], [0, 320]])
            m = cv2.getPerspectiveTransform(src, dst)
            # self.to_binary_adaptive()

            self.img = cv2.warpPerspective(self.backup, m, (190, 320))
            # self.equalizeHist()

            self.to_binary_adaptive()
            self.get = True
        else:
            print("QR Code not detected")
