
# -*- coding:UTF-8 -*-
import sys
import warnings
import easyocr
from datetime import datetime
import logging
import cv2
import operator
import numpy
screenScale = 0.25

warnings.filterwarnings("ignore", category=UserWarning)

sys.dont_write_bytecode = True

logging.disable(logging.DEBUG)  # 关闭DEBUG日志的打印
logging.disable(logging.WARNING)  # 关闭WARNING日志的打印

reader = easyocr.Reader(['ch_sim'], gpu=True)
reader_en = easyocr.Reader(['en'], gpu=True)


def FQCR(screen, text='', x=0, x1=1, y=0, y1=1):
    temp = cv2.imread(screen['path'], cv2.IMREAD_GRAYSCALE)
    tempheight, tempwidth = temp.shape[0:2]
    scaleTemp = cv2.resize(
        temp, (int(tempwidth / screenScale), int(tempheight / screenScale)))
    windowW = screen['width'] / screenScale
    windowH = screen['height'] / screenScale
    cx = int(x*windowW)
    cx1 = int(x1*windowW)
    cy = int(y*windowH)
    cy1 = int(y1*windowH)
    cropped = scaleTemp[cy:cy1, cx:cx1]
    # 保存区域
    img = numpy.frombuffer(numpy.ascontiguousarray(cropped), dtype='uint8')
    img.shape = (cy1-cy, cx1-cx)
    cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
    cv2.imwrite('img_clicp.jpg', img,
                [int(cv2.IMWRITE_JPEG_QUALITY), 100])

    result = reader.readtext(cropped)
    data = {}
    data['isFind'] = False
    for line in result:
        state = operator.contains(line[1], text)
        if state:
            data['isFind'] = True
            a = line[0][0]
            b = line[0][1]
            c = line[0][2]
            # d=line[0][3]
            tagHalfW = int((b[0]-a[0])/2)
            tagHalfH = int((c[1]-b[1])/2)
            tagCenterX = int((a[0]+tagHalfW+cx)*screenScale)
            tagCenterY = int((a[1]+tagHalfH+cy)*screenScale)
            data["x"] = tagCenterX
            data["y"] = tagCenterY
            data['startX'] = int((a[0]+cx)*screenScale)
            data['endX'] = int((b[0]+cx)*screenScale)
            data['startY'] = int((b[1]+cy)*screenScale)
            data['endY'] = int((c[1]+cy)*screenScale)
            break
    print(f'{str(datetime.now())}--识字：{str(text)},{str(data)}')
    return data


def FQCREN(screen, text='', x=0, x1=1, y=0, y1=1):
    temp = cv2.imread(screen['path'], cv2.IMREAD_GRAYSCALE)
    tempheight, tempwidth = temp.shape[0:2]
    scaleTemp = cv2.resize(
        temp, (int(tempwidth / screenScale), int(tempheight / screenScale)))
    windowW = screen['width'] / screenScale
    windowH = screen['height'] / screenScale
    cx = int(x*windowW)
    cx1 = int(x1*windowW)
    cy = int(y*windowH)
    cy1 = int(y1*windowH)
    cropped = scaleTemp[cy:cy1, cx:cx1]
    # 保存区域
    # img = numpy.frombuffer(numpy.ascontiguousarray(cropped), dtype='uint8')
    # img.shape = (cy1-cy, cx1-cx)
    # cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
    # cv2.imwrite('img_clicp33333333.jpg', img,
    #             [int(cv2.IMWRITE_JPEG_QUALITY), 100])
    result = reader_en.readtext(cropped)
    data = {}
    data['isFind'] = False

    for line in result:
        state = operator.contains(line[1], text)
        if state:
            data['isFind'] = True
            a = line[0][0]
            b = line[0][1]
            c = line[0][2]
            # d=line[0][3]
            tagHalfW = int((b[0]-a[0])/2)
            tagHalfH = int((c[1]-b[1])/2)
            tagCenterX = int((a[0]+tagHalfW+cx)*screenScale)
            tagCenterY = int((a[1]+tagHalfH+cy)*screenScale)
            data["x"] = tagCenterX
            data["y"] = tagCenterY
            data['startX'] = int((a[0]+cx)*screenScale)
            data['endX'] = int((b[0]+cx)*screenScale)
            data['startY'] = int((b[1]+cy)*screenScale)
            data['endY'] = int((c[1]+cy)*screenScale)
            break
    print(f'{str(datetime.now())}--识字：{str(text)},{str(data)}')
    return data


def FQCRARR(screen, texts=[], x=0, x1=1, y=0, y1=1):
    temp = cv2.imread(screen['path'], cv2.IMREAD_GRAYSCALE)
    tempheight, tempwidth = temp.shape[0:2]
    scaleTemp = cv2.resize(
        temp, (int(tempwidth / screenScale), int(tempheight / screenScale)))
    windowW = screen['width'] / screenScale
    windowH = screen['height'] / screenScale
    cx = int(x*windowW)
    cx1 = int(x1*windowW)
    cy = int(y*windowH)
    cy1 = int(y1*windowH)
    cropped = scaleTemp[cy:cy1, cx:cx1]
    # 保存区域
    # img = numpy.frombuffer(numpy.ascontiguousarray(cropped), dtype='uint8')
    # img.shape = (cy1-cy, cx1-cx)
    # cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
    # cv2.imwrite('img_clicp.jpg', img,
    #             [int(cv2.IMWRITE_JPEG_QUALITY), 100])
    result = reader.readtext(cropped)
    data = {}
    data['isFind'] = False
    for line in result:
        for text in texts:
            state = operator.contains(line[1], text)
            if state:
                data['isFind'] = True
                a = line[0][0]
                b = line[0][1]
                c = line[0][2]
                # d=line[0][3]
                tagHalfW = int((b[0]-a[0])/2)
                tagHalfH = int((c[1]-b[1])/2)
                tagCenterX = int((a[0]+tagHalfW+cx)*screenScale)
                tagCenterY = int((a[1]+tagHalfH+cy)*screenScale)
                data["text"] = text
                data["x"] = tagCenterX
                data["y"] = tagCenterY
                data['startX'] = int((a[0]+cx)*screenScale)
                data['endX'] = int((b[0]+cx)*screenScale)
                data['startY'] = int((b[1]+cy)*screenScale)
                data['endY'] = int((c[1]+cy)*screenScale)
                break
    print(f'{str(datetime.now())}--识字：{str(texts)},{str(data)}')
    return data


def clipQCR(cut, text=''):
    temp = cv2.imread(cut['path'], cv2.IMREAD_GRAYSCALE)
    result = reader.readtext(temp)
    data = {}
    data['isFind'] = False
    for line in result:
        state = operator.contains(line[1], text)
        if state:
            data['isFind'] = True
            a = line[0][0]
            b = line[0][1]
            c = line[0][2]
            # d=line[0][3]
            tagHalfW = int((b[0]-a[0])/2)
            tagHalfH = int((c[1]-b[1])/2)
            tagCenterX = int(a[0]+tagHalfW+cut['left'])
            tagCenterY = int(a[1]+tagHalfH+cut['top'])
            data["x"] = tagCenterX
            data["y"] = tagCenterY
            data['startX'] = int(a[0]+cut['top'])
            data['endX'] = int(b[0]+cut['top'])
            data['startY'] = int(b[1]+cut['left'])
            data['endY'] = int(c[1]+cut['left'])
            break
    print(f'{str(datetime.now())}--识字：{str(text)},{str(data)}')
    return data


def clipQCRtext(cut):
    temp = cv2.imread(cut['path'], cv2.IMREAD_GRAYSCALE)
    result = reader.readtext(temp)
    print(f'{str(datetime.now())}--检测文字：{str(result)}')
    return result


def FQCREN_text(screen, x=0, x1=1, y=0, y1=1):
    '''
    比例查询
    '''
    temp = cv2.imread(screen['path'], cv2.IMREAD_GRAYSCALE)
    tempheight, tempwidth = temp.shape[0:2]
    scaleTemp = cv2.resize(
        temp, (int(tempwidth / screenScale), int(tempheight / screenScale)))
    windowW = screen['width'] / screenScale
    windowH = screen['height'] / screenScale
    if x1 == 0:
        x1 = windowW
    if y1 == 0:
        y1 = windowH
    cx = int(x*windowW)
    cx1 = int(x1*windowW)
    cy = int(y*windowH)
    cy1 = int(y1*windowH)
    cropped = scaleTemp[cy:cy1, cx:cx1]
    img = numpy.frombuffer(numpy.ascontiguousarray(cropped), dtype='uint8')
    img.shape = (cy1-cy, cx1-cx)
    cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
    cv2.imwrite('img_clicp222.jpg', img,
                [int(cv2.IMWRITE_JPEG_QUALITY), 100])

    result = reader_en.readtext(cropped)
    print(result)
    data = ''
    for line in result:
        data += line[1]
    print(f'{str(datetime.now())}--检测计算：{str(data)}')
    return data


def FQCRtoNum(screen, x=0, x1=1, y=0, y1=1):
    temp = cv2.imread(screen['path'], cv2.IMREAD_GRAYSCALE)
    tempheight, tempwidth = temp.shape[0:2]
    scaleTemp = cv2.resize(
        temp, (int(tempwidth / screenScale), int(tempheight / screenScale)))
    windowW = screen['width'] / screenScale
    windowH = screen['height'] / screenScale
    if x1 == 0:
        x1 = windowW
    if y1 == 0:
        y1 = windowH
    cx = int(x*windowW)
    cx1 = int(x1*windowW)
    cy = int(y*windowH)
    cy1 = int(y1*windowH)
    cropped = scaleTemp[cy:cy1, cx:cx1]
    # 保存区域
    # img = numpy.frombuffer(numpy.ascontiguousarray(cropped), dtype='uint8')
    # img.shape = (cy1-cy, cx1-cx)
    # cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
    # cv2.imwrite('img_clicp.jpg', img,
    #             [int(cv2.IMWRITE_JPEG_QUALITY), 100])
    result = reader.readtext(cropped)
    print(result)
    data = 0
    for line in result:
        strNum = line[1].replace(
            '+', '').replace('=', '').replace('-', '').replace('金', '').replace('币', '')
        isdigit = strNum.isdigit()
        if isdigit:
            data = strNum
    print(f'{str(datetime.now())}--检测数字：{str(data)}')
    return data


if __name__ == '__main__':
    print("我是QCR模块文件")


else:
    print("我是QCR模块文件")
