import os
import cv2
import numpy as np
import torch
from ultralytics import YOLO
from PIL import Image, ImageTk
from src.screenshot.screenshot_py32 import screenshot_py32
from src.bean.anchor_point import anchor_point

COLORS = [(0, 0, 255), (255, 0, 0), (0, 255, 0), (255, 255, 0), (0, 255, 255), (255, 0, 255), (192, 192, 192),
          (128, 128, 128), (128, 0, 0), (128, 128, 0), (0, 128, 0)]


class window_way:
    def __init__(self, **kwargs):
        self.title = kwargs.get("title")
        self.model_file_name = kwargs.get("model_file_name")
        self.device_name = kwargs.get("device_name")
        self.img_width = 768 if kwargs.get("img_width") is None else kwargs.get("img_width")
        self.img_height = 432 if kwargs.get("img_height") is None else kwargs.get("img_height")
        current_path = os.getcwd()
        model_file_path = os.path.join(current_path, "models", self.model_file_name)
        if self.device_name == "cpu":
            self.device = torch.device("cpu")
        else:
            self.device = torch.device("cuda:0")
        self.model = YOLO(model_file_path).to(self.device)

    def capture_image(self):
        image = np.ascontiguousarray(screenshot_py32(self.title))
        return image

    # 返回识别集合
    def predict_anchor(self, image):
        results = self.model.predict(image)
        return results

    def predict_nobox_image(self, image):
        cvimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
        pilImage = Image.fromarray(cvimage)
        pilImage = pilImage.resize((self.img_width, self.img_height), Image.LANCZOS)
        return ImageTk.PhotoImage(image=pilImage)

    # 返回识别后带框的图片
    def predict_image(self, image, results):
        image = self.draw_box(image, results)
        cvimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
        pilImage = Image.fromarray(cvimage)
        pilImage = pilImage.resize((self.img_width, self.img_height), Image.LANCZOS)
        return ImageTk.PhotoImage(image=pilImage)

    #通过解析结果 来获取目标坐标
    def jiexi_result(self, results):
        ret = []
        for result in results:
            box = result.boxes
            # 匹配的类型数量小于1则不处理，表示未匹配到
            if len(box.cls.tolist()) < 1:
                continue
            for index, listItem in enumerate(box.xyxy.tolist()):
                x0, y0, x1, y1 = int(listItem[0]), int(listItem[1]), int(listItem[2]), int(listItem[3])
                type_index = int(box.cls.tolist()[index])
                title = result.names[type_index]
                ret.append(anchor_point(name=title, x=x0, y=y0, x1=x1, y1=y1))
        return ret

    #把识别到的对象画上框
    def draw_box(self, image, results):
        # 通过一个循环遍历每个边界框。
        for result in results:
            box = result.boxes
            # 匹配的类型数量小于1则不处理，表示未匹配到
            if len(box.cls.tolist()) < 1:
                continue
            # 获取置信度，粗略的使用第一个值
            # conf = box.conf.tolist()[0]
            # 获取标题映射result.names key在box.cls里面
            # {0: 'retry', 1: 'role', 2: 'start1', 3: 'start2', 4: 'start3', 5: 'start4', 6: 'start5'}
            for index, listItem in enumerate(box.xyxy.tolist()):
                # 获取当前边界框的左上角和右下角坐标，并将其转换为整数类型
                x0, y0, x1, y1 = int(listItem[0]), int(listItem[1]), int(listItem[2]), int(listItem[3])
                # color_len = int(box.cls.tolist()[0]) % len(COLORS)
                # 使用 cv2.rectangle 函数在图像上绘制边界框，传入边界框的左上角坐标和右下角坐标，颜色值以及线宽（这里设定为3）。
                type_index = int(box.cls.tolist()[index])
                color = COLORS[type_index % len(COLORS)]
                cv2.rectangle(image, (x0, y0), (x1, y1), color, 2)
                # 拿到标题
                text = result.names[type_index]
                # cv2.putText 函数在图像上绘制标签文本，传入标签文本内容、文本位置、字体、字体大小、颜色值以及文本厚度
                cv2.putText(image, text, (max(0, x0), max(0, y0 - 5)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
        return image
