from datetime import datetime
import math
import cv2
from PIL import Image
import numpy
from ultralytics import YOLO
import torch
# 有 GPU 就用 GPU，没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device:', device)

model = YOLO('v2.pt')
# 切换计算设备
model.to(device)


def yl(sereen, classes=[]):
    global model
    image = Image.open(sereen['path'])
    results = model.predict(source=image, save=True,
                            save_txt=True, classes=classes, conf=0.25)
    result = []

    for r in results:
        for xywh in r.boxes.xywh:
            x = int(xywh[0])
            y = int(xywh[1])
            result_item = {
                'isFind': True,
                'center_x': x,
                'center_y': y,
                'x': x,
                'y': y,
                'x_offset': int(xywh[2]),
                'y_offset': int(xywh[3])
            }
            result.append(result_item)
    print(f'{str(datetime.now())}--yolo：{str(result)}')
    return result


def yl_rect(sereen, classes=[], start_x=0,  end_x=1, start_y=0,):
    global model
    temp = cv2.imread(sereen['path'])
    tempHeight, tempWidth = temp.shape[0:2]
    model_aspectratio = 640/tempWidth
    aspectratio = tempWidth / tempHeight

    crop_start_x = int(start_x * tempWidth)
    crop_end_x = int(end_x * tempWidth)
    crop_start_y = int(start_y * tempHeight)

    cropped_w = int(crop_end_x-crop_start_x)
    cropped_h = int(cropped_w/aspectratio)
    crop_end_y = int(crop_start_y + cropped_h)

    cropped = temp[crop_start_y:crop_end_y, crop_start_x:crop_end_x]

    results = model.predict(source=cropped, save=True, imgsz=int(
        cropped_w*model_aspectratio), save_txt=True, classes=classes, conf=0.25)
    result = []
    for r in results:
        for xywh in r.boxes.xywh:
            x = int(xywh[0])
            y = int(xywh[1])
            result_item = {
                'isFind': True,
                'center_x': crop_start_x+x,
                'center_y': crop_start_y+y,
                'x': crop_start_x + x,
                'y': crop_start_y+y,
                'x_offset': int(xywh[2]),
                'y_offset': int(xywh[3])
            }
            result.append(result_item)
    print(f'{str(datetime.now())}--yolo：{str(result)}')
    return result


def yl_center(sereen, classes=[], rect=260):
    global model

    temp = cv2.imread(sereen['path'])
    tempHeight, tempWidth = temp.shape[0:2]
    model_aspectratio = 640/tempWidth

    aspectratio = tempWidth / tempHeight
    size_x = rect/2
    size_y = math.floor(size_x/aspectratio)

    center_x = tempWidth // 2
    center_y = tempHeight // 2
    crop_start_y = int(center_y - size_y)-50
    crop_end_y = int(center_y + size_y)-50
    crop_start_x = int(center_x - size_x)
    crop_end_x = int(center_x + size_x)

    cropped = temp[crop_start_y:crop_end_y, crop_start_x:crop_end_x]

    results = model.predict(source=cropped, save=True, imgsz=int(rect*model_aspectratio),
                            save_txt=True, classes=classes, conf=0.25)
    result = []

    for r in results:
        for xywh in r.boxes.xywh:
            result_item = {
                'isFind': True,
                'x': crop_start_x+int(xywh[0]),
                'y': crop_start_y+int(xywh[1]),
                'x_offset': int(xywh[2]),
                'y_offset': int(xywh[3])
            }
            result.append(result_item)
    print(f'{str(datetime.now())}--yolo：{str(result)}')
    return result


if __name__ == "__main__":

    print("我是yolo文件")

else:
    print("我是yolo文件")
