import cv2
import torch
import os
from modelscope.models import Model
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import random
import shutil
from Serial import flowing_send
#from typing import Tuple

# 加载YOLOv5模型（选择适合的模型版本）  # 创建pipeline
model = torch.hub.load('./yolov5/', 'yolov5l6', source='local')  # 你可以选择'yolov5m', 'yolov5s'等
second_model = Model.from_pretrained("./new_model")  # 第二个模型
image_classification = pipeline(Tasks.image_classification, model=second_model)

# 清空文件夹
def clear_directory(directory="./data_image"):  # 清空文件夹及其子文件夹
    for filename in os.listdir(directory):
        file_path = os.path.join(directory, filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
            elif os.path.isdir(file_path):
                shutil.rmtree(file_path)
        except Exception as e:
            print(f'Failed to delete {file_path}. Reason: {e}')

def generate_dir(base_dir:str='./data_image') -> str:
	cut_dir = os.path.join(base_dir, 'cut')
	# save_dir = os.path.join(base_dir, 'save')
	# 确保文件夹存在
	if not os.path.exists(base_dir):
		os.makedirs(base_dir)

	if not os.path.exists(cut_dir):
		os.makedirs(cut_dir)
	# if not os.path.exists(save_dir):
	# 	os.makedirs(save_dir)
	# 清空文件夹中的图片
	clear_directory(cut_dir)
	# clear_directory(save_dir)

	return cut_dir

import asyncio
flowing_send_lock = asyncio.Lock()
async def cut_images(image:Image.Image,base_dir:str = './data_image'):  #将图片分割成物体，返回一个列表，每个列表是一个物体的xy坐标
	image_array = np.array(image)
	generate_dir(base_dir)
	loop = asyncio.get_event_loop()
	results = await loop.run_in_executor(None, model, image_array)
	return results.xyxy[0].cpu().numpy()

async def predict_images(box,index:int,image:Image.Image,base_dir:str='./data_image'):
	loop = asyncio.get_event_loop()
	image_copy = image.copy()
	cut_dir = os.path.join(base_dir, 'cut')
	x1, y1, x2, y2, _, _ = box  # 解析每个目标的坐标和类别信息
	print(f"物体坐标: ({x1}, {y1}), ({x2}, {y2})")
	slice_image = image.crop((x1, y1, x2, y2))  # 裁剪目标区域
	# slice_dict['slice-{}.jpg'.format(i)] = (x1, y1, x2, y2)
	slice_image.save(os.path.join(cut_dir, 'slice-{}.jpg'.format(index)))
	#result = image_classification(slice_image)  # 使用第二个模型进行分类
	result = await loop.run_in_executor(None, image_classification, slice_image)  # 使用第二个模型进行分类
	label = result.get("labels")[0].split("-")[0]  # 获取类别标签
	print(f"物体类别: {label}")
	if label == "可回收物":
		flag =0
	elif label == "有害垃圾":
		flag =1
	elif label == "厨余垃圾":
		flag =2
	else:
		flag =3
	# 在原图上绘制边界框和标签
	draw = ImageDraw.Draw(image_copy)
	draw.rectangle([x1, y1, x2, y2], outline=(random.randint(100,255),random.randint(100,255),random.randint(100,255)), width=4)
	font = ImageFont.truetype("./zh-cn.ttf", 20)
	draw.text((x1, y1), label, fill="red", font=font)
	# image_output.save(os.path.join(save_dir, 'output.jpg'))
	# first_point:Tuple[np.float32,np.float32,np.float32,np.float32] = slice_dict.get("slice-1.jpg")
	result_point = tuple(i.astype(float) for i in (x1, y1, x2, y2))
	return flag,result_point, image_copy

from nicegui import ui

@ui.page("/")
def index():
	with ui.row():
		with ui.column():
			ui.markdown("# **智能垃圾分类**")
			ui.button("启动").style("width: 100%; height: 100px; font-size: 40px;").on_click(lambda: ui.navigate.to("/capture"))
		ui.video("./assets/123.mp4",autoplay=True,loop=True).style("width: 700px; height: auto;")

@ui.page("/capture")
def index():
	with ui.row():
		async def process():
			x.set_source("./assets/loading.gif")
			cap = cv2.VideoCapture(0)  # 使用默认摄像头
			_ , frame = cap.read()
			# cv2.imwrite('./input.jpg', frame)
			cap.release()
			image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
			boxes = await cut_images(image)
			x.set_source(image)
			loop = asyncio.get_event_loop()
			print(boxes.shape)
			for i,box in enumerate(boxes):
				target,xy,image=await predict_images(box,i,image)
				x.set_source(image)
				print(xy)
				asyncio.create_task(run_flowing_send(loop, xy, target))
		with ui.column():
			ui.markdown("# **智能垃圾分类**")
			ui.button("拍照",on_click=process).style("width: 100%; height: 100px; font-size: 50px;")
		x=ui.interactive_image().style("width: 700px; height: auto;")

async def run_flowing_send(loop, xy, target):
    async with flowing_send_lock:
        await loop.run_in_executor(None, flowing_send, xy, target)

async def long_running_function(xy,target):
    # 模拟一个耗时很长的操作
    await asyncio.sleep(5)
    print(f"Long running function completed with xy: {xy}")


ui.run(native=True)
