# 轮廓检测
import io
import time

import numpy as np
from PIL import Image
# 轮廓面积
from matplotlib import pyplot as plt

'''边缘检测'''
def canny(image, low_threshold, high_threshold):
    # 将图像转为灰度图像
    gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # 调用OpenCV的Canny边缘检测函数
    edges = cv2.Canny(gray_img, low_threshold, high_threshold)

    return edges, {'low_threshold': low_threshold, 'high_threshold': high_threshold}


def sobel(image):
    # 将图像转换为灰度图像
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # 使用Sobel算子检测边缘
    sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
    sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)

    # 计算Sobel算子的幅值和方向
    sobel_mag, sobel_angle = cv2.cartToPolar(sobel_x, sobel_y, angleInDegrees=True)

    # 将幅值归一化到0-255范围内
    sobel_mag = cv2.normalize(sobel_mag, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)

    # 返回Sobel边缘检测结果
    return sobel_mag, {}


def robert(image):
    # 将图像转换为灰度图像
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # 定义Robert算子
    robert_x = np.array([[-1, 0], [0, 1]])
    robert_y = np.array([[0, -1], [1, 0]])

    # 对灰度图像进行Robert边缘检测
    robert_x_edge = cv2.filter2D(gray, -1, robert_x)
    robert_y_edge = cv2.filter2D(gray, -1, robert_y)
    edge_detected_image = cv2.bitwise_or(robert_x_edge, robert_y_edge)

    # 返回结果图像
    return edge_detected_image, {}


EDGE = {
    'canny': canny,
    'sobel': sobel,
    'robert': robert
}


def edge(img, **params):
    edge_type = params['edge_type']
    print(f"edge type is: {edge_type}")
    params.pop('edge_type')
    # 将params中的字符串数字转换为整数
    for key in params:
        if params[key].isdigit():
            params[key] = int(params[key])
    return EDGE[edge_type](img, **params)


# 轮廓检测
import cv2


def f_contours(img, thresh):
    thresh = int(thresh)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, binary = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)
    binary, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
    return img, {'thresh': thresh}


# 轮廓面积
def Con_area(image):
    import cv2

    # img = cv2.imread(image)
    '''
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    '''
    cnt = f_contours(image)

    area = 0
    for i in cnt:
        area += cv2.contourArea(i)

    return area

# 轮廓周长
def Con_perimeter(image):
    cnt = f_contours(image)
    perimeter = 0

    for i in cnt:
        #   True表示轮廓形状是闭合的，如果是开放的则改为False
        perimeter += cv2.arcLength(i, True)

    return perimeter

# 轮廓近似
def approximate_contours(image, isclosed):
    # isclosed 输入True 或者 False

    cnts = sorted(f_contours(image), key=cv2.contourArea, reverse=True)
    cnt = cnts[0]

    perimeter = cv2.arcLength(cnt, isclosed)

    epsilon = 0.01 * perimeter
    approx = cv2.approxPolyDP(cnt, epsilon, isclosed)
    # print('轮廓近似后的面积为', cv2.contourArea(approx))
    # img2 = cv2.drawContours(img2, [approx], 0, (0, 255, 0), 2)
    # plt.imshow(img2)
    return approx


'''特征检测'''
# SIFT检测
def sift(img):
    initsift = cv2.xfeatures2d.SIFT_create()
    kp, des = initsift.detectAndCompute(img, None)  # des是描述子
    cv2.drawKeypoints(img, kp, img, color=(255, 0, 255))  # 画出特征点，并显示为红色圆圈
    # return img, {'kp': kp, 'des': des}
    return img, {}

# SURF检测
def surf(img):
    initsurf = cv2.xfeatures2d.SURF_create(7000)  # SURF Hessian 的阈值
    kp, des = initsurf.detectAndCompute(img, None)  # 寻找关键点
    cv2.drawKeypoints(img, kp, img, color=(255, 0, 255))  # 绘制关键点
    # return img, {'kp': kp, 'des': des}
    return img, {}

FEATURES = {
    'sift': sift,
    'surf': surf
}


def feature(img, **params):
    feature_type = params['feature_type']
    print(f"feature type is: {feature_type}")
    params.pop('feature_type')
    # 将params中的字符串数字转换为整数
    for key in params:
        if params[key].isdigit():
            params[key] = int(params[key])
    return FEATURES[feature_type](img, **params)

'''畸变矫正'''
# 转换关系
def remap(img, cameraMatrix, distCoeffs, R, newCameraMatrix, size, m1type, interpolation: str):
    map1, map2 = cv2.initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, m1type)
    img = cv2.remap(img, map1, map2, interpolation)
    return img, {}


# 透视变换
def perspective(img):
    width, height = 632, 400  # 所需图像大小

    pts1 = np.float32([[194, 1476], [1024, 192], [865, 2017], [1784, 692]])  # 所需图像部分四个顶点的像素点坐标
    pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])  # 定义对应的像素点坐标
    matrix_K = cv2.getPerspectiveTransform(pts1, pts2)  # 使用getPerspectiveTransform()得到转换矩阵
    img_K = cv2.warpPerspective(img, matrix_K, (width, height))  # 使用warpPerspective()进行透视变换
    return img_K, {}


'''像素统计'''
def calcHist(img):
    hist_full = cv2.calcHist([img], [0], None, [256], [0, 256])
    plt.plot(hist_full)
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    img = np.array(Image.open(buf))
    plt.close()
    return img, {}


'''OCR文字识别和提取'''
from paddleocr import PaddleOCR, draw_ocr
import os


def OCR(img):
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
    ocr = PaddleOCR(use_angle_cls=True, lang="ch")  # need to run only once to download and load model into memory
    result = ocr.ocr(img, cls=True)

    # 文字提取（打印文字的坐标、内容和置信度）
    for idx in range(len(result)):
        res = result[idx]
        for line in res:
            print(line)

    # 文字识别（返回识别后的图片）
    Image.MAX_IMAGE_PIXELS = None
    result = result[0]
    image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    boxes = [line[0] for line in result]
    txts = [line[1][0] for line in result]
    scores = [line[1][1] for line in result]
    im_show = draw_ocr(image, boxes, txts, scores, font_path='static/simfang.ttf')
    # im_show = draw_ocr(image, boxes, txts, scores)
    ocr_img = np.array(Image.fromarray(im_show))
    return ocr_img, {}
