"""
2023-10-10
editor_by: sy
"""
import requests
from loguru import logger
import base64
from io import BytesIO
import ddddocr  # pip install ddddocr 或者python3.11版本 pip install ddddocr-py311
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import cv2

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
}


class Slider:
    @staticmethod
    def get_slider_offset_method2(background_pic_path, slider_path, minus_gap=0):
        """
        功能：
            返回模板匹配缺口距离
            :param background_pic_path: 背景带有滑块缺口的图片
            :param slider_path: 滑块图片
            :param minus_gap: 初始滑块的间隙距离
        注释：
            一般的彩色图像是三通道(RGB)，也就是Red，Green，Blue 三个通道
            灰度图是单通道，单通道中的每个像素的值介于0～255之间，代表黑色（0）到白色（255）之间的灰度程度
            黑白图像指的是二值图，每个像素非黑即白，黑是0，白是1；通常会用灰度图来表示二值图，黑是0，白是255
            cv2.imread(path, flag)：读进来直接是BGR格式，不是我们最常见的RGB格式（PIL.Image.open),flag=0(8位深度,1通道,灰度图),flag=1(8位深度,3通道),flag=-1(8位深度，原通道)
            cv2.cvtColor(p1,p2)：是颜色空间转换函数，p1是需要转换的图片，p2是转换成何种格式
            cv2.Canny(img, minVal, maxVal, apertureSize=3)：
                第一个参数是原图像；第二个和第三个参数分别是两个阈值，minVal 和 maxVal, 来确定真实和潜在的边缘, apertureSize：sobel算子（卷积核）大小
                调整minVal，检测出的边缘会增多
                扩大apertureSize算子，会获得更多的细节
            cv2.matchTemplata(img_big,img_temp,cv2.method): img_big:在该图上查找图像, img_temp:待查找的图像，模板图像, method: 模板匹配的方法
            method:
                CV_TM_SQDIFF 平方差匹配法, 该方法采用平方差来进行匹配；最好的匹配值为0；匹配越差，匹配值越大
                CV_TM_CCORR 相关匹配法, 该方法采用乘法操作；数值越大表明匹配程度越好
                CV_TM_CCOEFF 相关系数匹配法, 1表示完美的匹配；-1表示最差的匹配
                CV_TM_SQDIFF_NORMED 归一化平方差匹配法
                CV_TM_CCORR_NORMED 归一化相关匹配法
                CV_TM_CCOEFF_NORMED 归一化相关系数匹配法

        """

        def image_edge_detection(img):
            """
            cv2.Canny(img, minVal, maxVal, apertureSize)：第一个参数是原图像；第二个和第三个参数分别是两个阈值，minVal 和 maxVal, 来确定真实和潜在的边缘
            调整minVal，检测出的边缘会增多
            """
            edges = cv2.Canny(img, 80, 200)
            # cv2.imshow("edges", edges)
            # cv2.waitKey(0)
            return edges

        def operate_slider(slider_img):
            """
            除缺口图片部分，其它额外的像素点bgr值三个元素都一样，通过一致性将该类bgr像素点裁剪
            :param slider_img: 缺口图片
            :return: 精确部分的缺口图片
            """
            height, width, _ = slider_img.shape
            min_y, max_y = 0, 0
            min_x, max_x = width, 0
            for y in range(height):  # 从上往下
                col_item = [len(set(item)) for item in slider_img[y]]
                row_len = len(set(col_item))
                if row_len != 1:
                    if not min_y:
                        min_y = y
                    else:
                        max_y = y
                    for index, col in enumerate(col_item):
                        if col != 1:
                            if index > max_x:
                                max_x = index
                            if index < min_x:
                                min_x = index
                if min_y and row_len == 1:
                    break
            # 裁剪清除区域
            slider_img = slider_img[min_y:max_y, min_x: max_x]
            # 已灰度处理并获取边界信息
            gray_slider_img = image_edge_detection(cv2.cvtColor(slider_img, cv2.COLOR_BGR2GRAY))
            return gray_slider_img

        def template_match(slider_pic, background_pic):
            """
            功能：模板匹配
                :param slider_pic: 滑块
                :param background_pic: 背景图
                :return: 距离

            """
            th, tw = slider_pic.shape[:2]
            result = cv2.matchTemplate(background_pic, slider_pic, cv2.TM_CCOEFF_NORMED)
            # 获取最佳匹配区域的位置，最小值和最大值的位置, max_loc左上角坐标点
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
            # print(min_val, max_val, min_loc, max_loc)
            left_top = max_loc
            # # 绘制矩形边框，将匹配区域标注出来, background_pic：目标图像, left_top：矩形左上角坐标点, right_bottom：右下角的坐标点, (0,0,255)：矩形边框颜色, 2: 线宽
            right_bottom = (left_top[0] + tw, left_top[1] + th)
            cv2.rectangle(background_pic, left_top, right_bottom, (0, 0, 255), 2)
            # cv2.imshow("v", background_pic)
            # cv2.waitKey(0)
            return left_top[0]

        # 1、处理滑块缺口
        edge_slider_img = operate_slider(cv2.imread(slider_path))
        # 2、以灰度模式读取背景图片,获取边界信息
        edge_background_img = image_edge_detection(cv2.imread(background_pic_path, 0))
        # 3、模板匹配
        edge_slider_img = cv2.cvtColor(edge_slider_img, cv2.COLOR_GRAY2RGB)  # 彩色化，如果需要看到匹配的区域并用红框标出，则需要彩色化，否则这个可以注释
        edge_background_img = cv2.cvtColor(edge_background_img,
                                           cv2.COLOR_GRAY2RGB)  # 彩色化，如果需要看到匹配的区域并用红框标出，则需要彩色化，否则这个可以注释
        x = template_match(edge_slider_img, edge_background_img)
        return x - minus_gap

    @staticmethod
    def get_slider_offset_method3(cut_pic_path, pic_path, threshold=60):
        """比较两张图片的像素点RGB的绝对值是否小于阈值60,如果在阈值内则相同,反之不同"""
        pic_img = Image.open(pic_path)
        cut_img = Image.open(cut_pic_path)
        width, height = pic_img.size
        for x in range(40, width - 40):  # 从左往右
            for y in range(5, height - 10):  # 从上往下
                pixel1 = pic_img.load()[x, y]
                pixel2 = cut_img.load()[x, y]
                if abs(pixel1[0] - pixel2[0]) < threshold and abs(
                        pixel1[1] - pixel2[1]) < threshold and abs(
                    pixel1[2] - pixel2[2]) < threshold:
                    continue
                else:
                    return x

    @staticmethod
    def get_slider_offset_method1(pic_content, cut_content, fixed_rgb_str='[245, 250, 255]', bulge_offset=10):
        """
        返回缺口距离
        :param pic_content: 滑块
        :param cut_content: 缺口
        :param fixed_rgb_str: 单一固定的rgb
        :param bulge_offset: 凸出的宽度，默认凸出只有上，左，下 3个方向
        :return:
        """
        fixed_rgb = eval(fixed_rgb_str)
        _slider_image = Image.open(BytesIO(pic_content))
        cut_image = Image.open(BytesIO(cut_content))
        cut_image_width = cut_image.width
        array_img = np.array(_slider_image)
        height, width, _ = array_img.shape
        for y in range(height - 40):  # 从上往下
            for x in range(50, width - 40):  # 从左往右
                if np.all(array_img[y:y + 1, x:x + 39] == fixed_rgb):  # 横向的连续39个元素点的rgb值等于[245, 250, 255]
                    list_side = [array_img[y, x]]
                    for side in range(1, 39):
                        list_side.append(array_img[y + side][x + side])
                    array_side = np.array(list_side)
                    if np.all(array_side == fixed_rgb):  # 斜向的连续39个元素点的rgb值等于[245, 250, 255]
                        if cut_image_width > 40:  # 如果缺口凸出的部分朝向是左，则减去10
                            return x - bulge_offset
                        return x
        return


class DdddocrI:
    @staticmethod
    def draw_click_img(content, xy_list):
        """画出图片"""
        # 填字字体
        font_type = "./msyhl.ttc"
        font_size = 20
        font = ImageFont.truetype(font_type, font_size)
        # 识别
        img = Image.open(BytesIO(content))
        draw = ImageDraw.Draw(img)
        words = []
        for row in xy_list:
            # 框字
            x1, y1, x2, y2 = row
            draw.line(([(x1, y1), (x1, y2), (x2, y2), (x2, y1), (x1, y1)]), width=1, fill="red")
            # 裁剪出单个字
            corp = img.crop(row)
            img_byte = BytesIO()
            corp.save(img_byte, 'png')
            # 识别出单个字
            word = char_ocr.classification(img_byte.getvalue())
            words.append(word)
            # 填字
            y = y1 - 30 if y2 > 300 else y2
            draw.text((int((x1 + x2) / 2), y), word, font=font, fill="red")
        # 展示图片，这里可以注释掉
        img.show()
        return words

    @staticmethod
    def clcik_identify(content, crop_size=None):
        """目标检测识别"""
        img = Image.open(BytesIO(content))
        # print(img.size)
        if crop_size:
            img = img.crop(crop_size)
            img_byte = BytesIO()
            img.save(img_byte, 'png')
            content = img_byte.getvalue()
        xy_list = click_ocr.detection(content)
        words = DdddocrI.draw_click_img(content, xy_list)
        return dict(zip(words, xy_list))

    @staticmethod
    def ddddocr_identify(img_bytes, _type='char'):
        """
        _type: char代表字符类， slide代表滑块类， click代表点选类，
        """
        if _type == 'slide':  # 小图/背景图
            # 如果小图无过多背景部分，则可以添加simple_target参数， 通常为jpg或者bmp格式的图片
            _code = slide_ocr.slide_match(img_bytes[0], img_bytes[1], simple_target=True)['target'][0]
        elif _type == 'slide_bidui':  # 带缺口的背景图/不带缺口的背景图
            _code = slide_ocr.slide_comparison(img_bytes[0], img_bytes[1])['target'][0]  #
        elif _type == 'click':  # 目标检测/点选类
            click_identify_result = DdddocrI.clcik_identify(img_bytes, crop_size=None)
            img_xy = dict()
            for key, xy in click_identify_result.items():
                img_xy[key] = (int((xy[0] + xy[2]) / 2), int((xy[1] + xy[3]) / 2))
            _code = img_xy
        logger.info(f">>>>>识别结果是{_code}")
        return _code


def img_identify(_type='char', img_path='./test.jpg'):
    if _type == 'slide':
        img_content = [target_content, background_content]
        logger.info(Slider.get_slider_offset_method1(background_content, target_content))
        with open(r"./background_content.jpg", "wb") as f:
            f.write(background_content)
        with open(r"./target_content.jpg", "wb") as f:
            f.write(target_content)
    return img_content


slide_ocr = ddddocr.DdddOcr(det=False, ocr=False, show_ad=False)
img_identify('slide')
