#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time      :2024/12/11 14:39
# @Author    :hzy
# @FileName  :t.py
import uiautomator2 as u2
import numpy as np
from paddleocr import PaddleOCR, draw_ocr
from PIL import Image
import time,cv2


def get_curtime(time_format="%Y-%m-%d %H:%M:%S"):
    curTime = time.localtime()
    curTime = time.strftime(time_format, curTime)
    return curTime


def ocr_img_text(image, saveimg=False, printResult=False):
    '''
    图像文字识别
    :param image:图片
    :param saveimg:是否把结果保存成图片
    :param printResult:是否打印出识别结果
    :return:result,img_name
    '''

    """
    image = path
    # 图片路径为空就默认获取屏幕截图
    if image == "":
        image = screenshot()  # 使用pyautogui进行截图操作
        image = np.array(image)
    else:
        # 不为空就打开
        image = Image.open(image).convert('RGB')
        image = np.array(image)  # 经提醒，需要添加array
    """
    ocr = PaddleOCR(use_angle_cls=True, lang="ch")  # need to run only once to download and load model into memory

    result = ocr.ocr(image, cls=True)
    if printResult is True:
        for line in result:
            for word in line:
                print(word)
                pass

    # 识别出来的文字保存为图片
    img_name = "./log/ImgTextOCR-img-" + get_curtime("%Y%m%d%H%M%S") + ".jpg"
    if saveimg is True:
        boxes = [detection[0] for line in result for detection in line]  # Nested loop added
        txts = [detection[1][0] for line in result for detection in line]  # Nested loop added
        scores = [detection[1][1] for line in result for detection in line]  # Nested loop added
        im_show = draw_ocr(image, boxes, txts, scores)
        im_show = Image.fromarray(im_show)
        im_show.save(img_name)

    return result, img_name


def ocr_get_txt_pos(image, text=""):
    '''
    获取文字与位置对应map
    :param image:图片
    :param text: 筛选需要查找的内容，匹配所有位置
    :return:list
    '''

    result, img_path = ocr_img_text(image, saveimg=True)

    print("图片识别结果保存：", img_path)

    # 把结果列表的两个值分别再存为两个list
    poslist = [[(detection[0][0][0]+detection[0][1][0])/2,(detection[0][0][1]+detection[0][2][1])/2] for line in result for detection in line]  # 取中心 一个点的位置
    txtlist = [detection[1][0] for line in result for detection in line]

    # 用list存文字与位置信息
    find_txt_pos = []
    find_txt = []

    items = 0

    if text == "":
        find_txt_pos = result
    else:
        for i in range(len(poslist)):
            if text in txtlist[i]:
                find_txt_pos.append(poslist[i])
                find_txt.append(txtlist[i])
                items += 1

    print(find_txt_pos,find_txt)
    return find_txt_pos,find_txt


if __name__ == "__main__":
    equipment = u2.connect('127.0.0.1:16384')
    im = equipment.screenshot()
    num_img = np.asarray(im)
    # num_img = cv2.imread("./home.jpg")
    orc = ocr_get_txt_pos(image=num_img, text="/240")
    pass
