import cv2
import numpy as np
import pytesseract

# @author: 强睿哲

def teeeeract_API():
    pytesseract.pytesseract.tesseract_cmd = r'D:\Tesseract-OCR\tesseract.exe'
    image = cv2.imread('./static/ref.jpg')
    text = pytesseract.image_to_string(image, lang='eng')

    f1 = open('./static/text_input.txt', 'w', encoding='utf-8')
    f1.write(text)
    f1.close()

    print(text)


def pre_solve(image):
    # resize 坐标也会相同变化
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = resize(orig, height=500)  # 同比例变化：h指定500,w也会跟着变化
    return image, orig, ratio


def pre_edged(image):
    # 预处理
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)  # 转成灰度图
    gray = cv2.GaussianBlur(gray, (5, 5), 0)  # 高斯滤波 去除噪音点
    edged = cv2.Canny(gray, 75, 200)  # 边缘检测
    return edged


def show_pre_solve(image, edged):
    cv2.imshow("Image", image)
    cv2.imshow("Edged", edged)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


def contours_detect(edged):
    # 轮廓检测
    cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]

    # cnts中可检测到许多个轮廓,取前5个最大面积的轮廓
    # 按照“面积”进行排序
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    # screenCnt = []
    for c in cnts:  # C表示输入的点集
        # 计算轮廓近似
        peri = cv2.arcLength(c, True)
        # epsilon表示从原始轮廓到近似轮廓的最大距离，它是一个准确度参数
        # True表示封闭的
        # 输入数据有可能是散点图，将其近似为连续的矩形
        # 精度为：长度的 0.02
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # 打印出来看看
        print("approx：\n", approx)
        print("approx.shape", approx.shape)

        # 4个点的时候就拿出来,screenCnt是这4个点的坐标
        if len(approx) == 4:  # 近似轮廓得到4个点,意味着可能得到的是矩形
            screenCnt = approx  # 并且最大的那个轮廓是很有可能图像的最大外围
            # print("screenCnt:", screenCnt)
            break

        # 如果不是4个点，随便返回一个数字，直接继续往下走
        return 1222

    # print(screenCnt)
    return screenCnt


def show_contours(image, screenCnt):
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    cv2.imshow("Outline", image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


def show_contours_extra(image):
    # cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
    image = cv2.rectangle(image, (500, 0), (500, 300), (0, 255, 0), 3)
    cv2.imshow("Outline", image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


def warp_reshape(orig, ratio, screenCnt):
    # 透视变换
    # 4个点的坐标 即4个(x,y),故reshape(4,2)
    # 坐标是在变换后的图上得到,要还原到原始的原图上,需要用到ratio
    # print(screenCnt.shape)
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    # 二值处理
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    ref = cv2.threshold(warped, 100, 255, cv2.THRESH_BINARY)[1]

    return ref


def warp_show(orig, ref):
    cv2.imshow("Original", resize(orig, height=650))
    cv2.imshow("OCR_Scanned", resize(ref, height=650))
    cv2.waitKey(0)


def order_points(pts):
    # 初始化4个坐标点的矩阵
    rect = np.zeros((4, 2), dtype="float32")

    # 按顺序找到对应坐标0123分别是 左上，右上，右下，左下
    # 计算左上，右下
    print("pts :\n ", pts)
    s = pts.sum(axis=1)  # 沿着指定轴计算第N维的总和
    print("s : \n", s)
    rect[0] = pts[np.argmin(s)]  # 即pts[1]
    rect[2] = pts[np.argmax(s)]  # 即pts[3]
    print("第一次rect : \n", rect)
    # 计算右上和左下
    diff = np.diff(pts, axis=1)  # 沿着指定轴计算第N维的离散差值
    print("diff : \n", diff)
    rect[1] = pts[np.argmin(diff)]  # 即pts[0]
    rect[3] = pts[np.argmax(diff)]  # 即pts[2]
    print("第二次rect :\n ", rect)
    return rect


def four_point_transform(image, pts):
    # 获取输入坐标点
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    # 计算输入的宽度w和长度h
    # 用距离公式进行计算
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # 变换后对应坐标位置
    dst = np.array([  # 目标点
        [0, 0],
        [maxWidth - 1, 0],  # 防止出错,-1
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")

    # 计算变换矩阵	(平移+旋转+翻转),其中
    # 矩阵是 3x3的
    M = cv2.getPerspectiveTransform(rect, dst)
    print("M:", M)
    print("M.shape:", M.shape)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    # 返回变换后结果
    return warped


def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
    dim = None
    (h, w) = image.shape[:2]
    if width is None and height is None:
        return image
    if width is None:  # 无w有h时
        r = height / float(h)  # 新h与旧h的比例为r
        dim = (int(w * r), height)  # 让w也乘以这个比例,得到新w
    else:
        r = width / float(w)
        dim = (width, int(h * r))
    resized = cv2.resize(image, dim, interpolation=inter)
    return resized


if __name__ == '__main__':

    # 在此处选择读入的图片
    image_read = cv2.imread('./static/image.jpg')
    # 4032 x 3024
    # image_read = cv2.resize(image_read, (500, 300))

    image, orig, ratio = pre_solve(image_read)
    edged = pre_edged(image)
    # cv2.imwrite('UI_interface/output_images/image.jpg', image)

    # 展示预处理结果
    print("STEP 1: 边缘检测")
    show_pre_solve(image, edged)
    cv2.imwrite('./static/edged.jpg', edged)

    # 展示结果
    print("STEP 2: 获取轮廓")
    screenCnt = contours_detect(edged)

    # 判断是否扫描到边框，如果没有，额外处理
    if (isinstance(screenCnt, int)):
        teeeeract_API()

    else:
        show_contours(image, screenCnt)
        cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)

        cv2.imwrite('./static/screenCnt.jpg', image)

        # 展示结果
        print("STEP 3: 变换")
        ref = warp_reshape(orig, ratio, screenCnt)
        warp_show(orig, ref)
        cv2.imwrite('./static/ref.jpg', ref)
        teeeeract_API()