from selenium import webdriver
from selenium.webdriver.common.by import By
from PIL import Image
import pytesseract
import cv2

import io

from PIL import Image


def del_noise(img):
    # 打开验证码图片
    image = Image.open(io.BytesIO(img))
    # image = Image.open(img)

    # 获取图像的宽度和高度
    width, height = image.size

    count_map = {}
    # 遍历每个像素并检查是否为横线干扰线
    for y in range(height):
        for x in range(width):
            print(image.getpixel((x, y)))
            r, g, b, a = image.getpixel((x, y))
            if (r, g, b) != (0, 0, 0) and (r, g, b) != (255, 255, 255):
                # print(r, g, b, a)
                count_map[(r, g, b)] = count_map.get((r, g, b), 0) + 1

    # 找出count_map值最大的key
    max_key = max(count_map, key=count_map.get)

    # 遍历每个像素并检查是否为横线干扰线
    for y in range(height):
        for x in range(width):
            # 获取像素值
            r, g, b, a = image.getpixel((x, y))
            # print(r, g, b, a)
            # 如果像素是黑色且上方和下方像素都是白色，则将其设置为白色
            if (r, g, b) == max_key:
                try:
                    if image.getpixel((x, y - 1)) == (0, 0, 0, 0) and image.getpixel((x, y + 1)) == (0, 0, 0, 0):
                        image.putpixel((x, y), (0, 0, 0, 0))
                except:
                    pass
    # 保存处理后的图片
    image.save('2222.png')
    # gray_image = image.convert('L')
    # gray_image.save('2222.png')

def img_shot():
    driver = webdriver.Chrome()
    driver.maximize_window()
    driver.get('http://localhost:8088/jpress/user/register')
    driver.save_screenshot("total.png")
    el = driver.find_element(by=By.ID,value="captchaimg")
    rect = el.rect
    print(rect)

    #处理截图:left, upper, right, and lower
    im = Image.open("../testcases/total.png")
    box = (rect['x'],rect['y'],rect['x']+rect['width'],rect['y']+rect['height'])
    im.crop(box).save("crop.png")


def verify():
    img = cv2.imread('../testcases/crop2.png')
    # img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
    img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_AREA)
    cv2.imwrite("../testcases/crop_o.png", img)
    # Timeout/terminate the tesseract job after a period of time
    try:
        print(pytesseract.image_to_string('crop_o.png', lang='eng', timeout=2))  # Timeout after 2 seconds
    except RuntimeError as timeout_error:
        # Tesseract processing is terminated
        pass

def process():
    '''
    参考：http://pythonabc.org/index.php/python-pdf/tesseract/114-3-11-5-opencv
    :return:
    '''

    img = cv2.imread('../testcases/crop.png', 0)  # 以灰度模式打开图片生成图片对象

    # 先列出用到的预处理手段：

    # 虚化处理
    blurMedian = cv2.medianBlur(img, 3)  # 中值虚化处理
    blurGaussian = cv2.GaussianBlur(img, (5, 5), 0)  # 高斯虚化处理

    # 阙值处理

    # 最直接的阙值处理
    simpleThreshold = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1]
    # 规定一个阙值127，小于的是背景，大于的是文字(255)。cv2.THRESH_BINARY位置还有其他参数可控选择，参见opencv-python的技术文档。返回一个列表，列表有两个元素，第二个元素是处理的图片对象，所以索引用1

    # 中值自适应阙值处理
    adaptiveThreshold1 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
    # cv2.ADAPTIVE_THRESH_MEAN_C，阙值取邻近区域的中值。返回处理后的图片对象，注意第一个参数图像对象img必须是灰度模式

    # 高斯自适应阙值处理
    adaptiveThreshold2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    # cv2.ADAPTIVE_THRESH_GAUSSIAN_C阙值是加了权重的邻近区域值的和，而这个权重的计算使用了高斯窗（Gaussian Window）

    # OTSU二值化处理
    otsuThreshold1 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]

    # 先高斯虚化过滤后，再做OTSU二值化处理
    otsuThreshold2 = cv2.threshold(blurGaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]

    # 也可以先中值虚化后跟自适应阙值处理组合，先高斯虚化后过滤后再自适应阙值处理，看哪个预处理预处理效果好。接下来把处理手法名称字串加入列表，作为窗口显示的标题。

    titles = ['Original Image', 'Gaussian filtered Image', 'Median blur', 'Global Thresholding(v=127)',
              'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding', "Otsu's Thresholding",
              "Otsu's Thresholding after Gaussian filter"]
    # 预处理后显示窗口的窗口标题列表

    # images = [img, blurGaussian, blurMedian, simpleThreshold, adaptiveThreshold1, adaptiveThreshold2, otsuThreshold1,
    #           otsuThreshold2]
    # images = [img, simpleThreshold, otsuThreshold1]
    # # 预处理后的图片对象
    #
    # for i in range(len(images)):
    #     cv2.imshow(titles[i], images[i])
    #     # 显示预处理后的图片，窗口标题从标题列表中取，预处理后的图片对象从对象列表中取
    #     cv2.waitKey(0)
    #     # 等待敲击键盘结束本次循环，开始下一次循环。注意对敲键盘有反应必须在图片窗口是前端的情况下（前段窗口标题背景为蓝，后段窗口标题背景为灰色）
    cv2.imwrite("../testcases/crop2.png", simpleThreshold)


def ddd_decoder(img):
    import ddddocr  # 导入 ddddocr
    ocr = ddddocr.DdddOcr(show_ad=False)  # 实例化
    with open(img, 'rb') as f:  # 打开图片
        img_bytes = f.read()  # 读取图片
    res = ocr.classification(img_bytes)  # 识别
    # print(res[:4])
    # print(type(res))


if __name__ == "__main__":
    # img = open('../testcases/crop.png', 'rb').read()
    # del_noise(img)
    # # test01()
    # # # test02()
    # # process()
    # test02()
    ddd_decoder('img.png')