
from paddlelite.lite import *
import paddlelite     #引入paddlelite框架
import cv2  #引入opencv框架
import numpy as np
import sys
import time
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import time
import config as cfg
from control import *
from DataHandler import DataHandler
import time
from threading import Thread
from flask1.app import Web
#类实例化对象
plat = Control()
dh = DataHandler()
web = Web()
fd = FullDetect()

class Predictor():

    def __init__(self, model_dir):#初始化函数，当创建predictor实例的时候调用
        self.predictor = self.Load(model_dir)#加载返回的模型到实例变量bababa中
        self.cap = cv2.VideoCapture(0)#初始化一个VideoCapture对象，括号里面是默认调用的摄像头
        self.cap.set(3, 1920)#摄像头捕捉图像的高度，宽度
        self.cap.set(4, 1080)

    def Load(self, model_dir):
        print("开始加载模型....")
        config = MobileConfig()# 创建一个MobileConfig对象
        config.set_model_from_file(model_dir)# 从给定的模型目录中设置模型
        predictor = create_paddle_predictor(config)# 使用MobileConfig对象创建PaddlePaddle预测器
        print("模型加载完毕....")
        return predictor# 返回预测器对象

    def Capture(self):#用于捕获摄像头的图像并保存
        '''保存一张图片'''
        ret, frame = self.cap.read()# 从摄像头读取一帧图像，返回值ret表示读取是否成功，frame为返回的图像
        print("摄像头打开正常")
        # 图片保存
        # cv2.imwrite('./img.jpg', frame)

    def Show(self):
        '''打开摄像头，并且展示'''
        while True:
            ret, frame = self.cap.read()
            if not ret:
                print("摄像头异常")
            cv2.imshow("frame", frame)#打开一个窗口用于展示
            # print(type(frame)) np.array
            cv2.waitKey(1)

    def ShowWithModel(self):#执行对象检测和清除操作
        global label_time#定义全局变量
        # ======================
        while True:
            ret, frame = self.cap.read()#检测摄像头是否异常
            if not ret:
                print("摄像头异常")
            # print(type(frame))
            h, w = frame.shape[:2]#获取帧的高度和宽度
            # print(h, w)
            res, img = self.Predict(frame)#调用对象检测的方法
            # print(type(img)) PIL.Image
            # print(np.array(img))

            #img是图片，reslabel标签，centerxy是xy坐标，object为0不清除为1执行清除，num_detected_objects是每次有几个物体在检测
            img, reslabel, center_x, center_y, object, num_detected_objects = self.draw(img, res)#调用标记并且绘制检测结果

            print(label_time)
            print("同种类类型有" + str(num_detected_objects))

            if object:
                print("开始强制清除")
                plat.TurnToGarbage("Other")#将垃圾的种类强制定义为other并且强制清除

            # print(center_x,center_y)
            cv2.imshow("frame", img)
            # =======================
            garbageType = dh.dataHandle(reslabel)#将检测结果传送给处理数据的函数
            # print(reslabel)
            print(reslabel, garbageType)
            plat.TurnToGarbage(garbageType)
            if garbageType == 'Recyclable':#当垃圾种类识别为可回收垃圾，执行压缩
                plat.Compress()
                time.sleep(5)
                plat.Uncompress()

            try:
                if not reslabel == "None":#当垃圾种类识别为无的时候执行的操作
                    web.NewData(reslabel,num_detected_objects)
                    dat = fd.GetFullStatus()
                    print(dat)
                    web.UpateFullStatus(dat)
                    time.sleep(1)
            except KeyboardInterrupt:#手动打断
                web.Close()
            # print(res)
            # image = p.draw(frame, res)
            # cv2.imwrite(r'./test.jpg', image)
            cv2.waitKey(1)
            # time.sleep(1)

    def draw(self, image, bbox):#检测物体并且标记
        global label_time
        global j  # 声明j为全局变量
        global start_time
        label_true = "None"  # 初始化label_true为 "None"
        label = "None"  # 初始化label为 "None"
        label_counts = {}  # 用于跟踪标签出现的次数的字典
        max_center_count = {}


        center_x = 0
        center_y = 0
        is_there_object = 0

        # 设置不检测物体时长
        timeout = 30
        elapsed_time = time.time() - start_time

        print("闲置时间：" + str(elapsed_time))
        # 循环遍历检测框信息

        if elapsed_time >= timeout:
            is_there_object = 1
            start_time = time.time()

        # 新增的变量，用于记录物体数量
        num_detected_objects = 0


        for i in range(0, len(bbox), 6):#检测当前识别框内的信息

            info = bbox[i:i + 6]  # 获取当前检测框的信息
            label = cfg.label_map[info[0]]  # 获取标签名称
            (text_width, text_height) = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
            background = cfg.color_map[info[0]]
            score = info[1]  # 获取置信度分数

            # print(is_there_object)

            # 如果置信度分数低于设定的阈值，跳过当前检测框
            if float(score) < cfg.threshold:
                continue
            start_time = time.time()
            #检测框为矩形，所以有四个坐标
            x1 = int(info[2])  # 获取检测框左上角x坐标
            x2 = int(info[4])  # 获取检测框右下角x坐标
            y1 = int(info[3])  # 获取检测框左上角y坐标
            y2 = int(info[5])  # 获取检测框右下角y坐标
            #确定中心坐标
            center_x = (x1 + x2) / 2
            center_y = (y1 + y2) / 2
            print(f'bbox:{x1, y1, x2, y2},label:{label},scores:{score}')

            image = cv2.rectangle(image, (x1, y1), (x2, y2),
                                  background, 2)#在识别到的物体上面画框
            if y1 < text_height:  # 在图片框内写
                image = cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 + text_height), color=background,
                                      thickness=-1)
                image = cv2.putText(image, label, (x1, y1 + text_height), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (0, 0, 0), 1)

            else:  # 在图片框外写
                image = cv2.rectangle(image, (x1, y1 - text_height), (x1 + text_width, y1), color=background,
                                      thickness=-1)
                image = cv2.putText(image, label, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (0, 0, 0), 1)

            # 在标签计数字典中增加或更新标签出现的次数
            if label in label_counts:
                label_counts[label] += 1
            else:
                label_counts[label] = 1

            # 在中心坐标字典中增加或更新中心坐标出现的次数
            center_key = (center_x, center_y)
            if center_key in max_center_count:
                max_center_count[center_key] += 1
            else:
                max_center_count[center_key] = 1

            num_detected_objects += 1  # 记录识别到的物体数量
            # 当j等于4时，表示已经连续识别了5次
            if j == 10:
                j = 0
                # 找到出现次数最多的标签
                max_label = max(label_counts, key=label_counts.get)
                label_true = max_label  # 将出现次数最多的标签赋给label_true
                print(f'平均出现最多标签为: {label_true}')

                # 找到出现次数最多的中心坐标
                max_center = max(max_center_count, key=max_center_count.get)
                center_x, center_y = max_center
                print(f'中心坐标为: ({center_x}, {center_y})')
                # 绘制标签框等操作
                # 这里你可以加入你的绘制代码
                # ...

                # 清空标签计数字典，以准备下一轮的计数
                label_time = label_time + 1
                label_counts = {}
                max_center_count = {}
            else:
                j += 1  # 增加j的值，以表示已经进行了一次识别

        return image, label_true, center_x, center_y, is_there_object, num_detected_objects, # 返回出现次数最多的标签

    def Predict(self, image):#裁剪图像？应该是吧
        # image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGRA2RGB))
        h, w = image.shape[:2]
        # image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB) # opencv读入的图片属性是BGR格式的 转换成RGB格式
        # print(h,w)
        # 定义裁剪区域的坐标
        x1, y1 = 590, 150  # 左上角坐标
        x2, y2 = x1 + 1080, y1 + 530  # 右下角坐标

        # 裁剪图像
        image = image[y1:y2, x1:x2]

        base_img = cv2.resize(image, cfg.target_size)

        h, w = base_img.shape[:2]
        # print(h,w)

        img = self.Preprocess(base_img)
        img_tensor = self.predictor.get_input(1)  # 1

        image_data = np.array(img).flatten().tolist()
        img_tensor.resize([1, 3, cfg.target_size[0], cfg.target_size[1]])
        img_tensor.set_float_data(image_data)

        scale_tensor = self.predictor.get_input(2)
        scale_tensor.resize([1, 2])
        scale_tensor.set_float_data(
            np.array([w / cfg.target_size[0], h / cfg.target_size[1]]).astype(
                'float32').reshape(1, 2).flatten().tolist())
        #
        shape_tensor = self.predictor.get_input(0)
        shape_tensor.resize([1, 2])
        shape_tensor.set_float_data(
            np.array([w, h]).astype('float32').reshape([1, 2]).flatten().tolist())
        # print("1")
        self.predictor.run()
        # print("2")
        # output_tensor_1 = self.predictor.get_output(1) # 被保留下来的索引,暂时未知，不知道有啥用
        output_tensor_0 = self.predictor.get_output(0)

        res_0 = output_tensor_0.float_data()
        # res_1 = output_tensor_1.int32_data()

        return res_0, base_img
        # ,res_1

    @staticmethod
    def Preprocess(img):#对图像进行预处理的方法
        # img = img.resize(input_image_size, Image.BILINEAR)
        # if img.mode != 'RGB':
        #    img = img.convert('RGB')
        # 图片已经转换成numpy的格式
        img = np.array(img).astype('float32').transpose((2, 0, 1))  # HWC to CHW
        mean = np.array(cfg.mean)
        std = np.array(cfg.std)
        img /= 255.0
        img -= mean[:, np.newaxis, np.newaxis]
        img /= std[:, np.newaxis, np.newaxis]
        img = img[np.newaxis, :]
        return img


if __name__ == '__main__':

    label_time = 0

    t1 = Thread(target=web.Run)
    t1.start()

    fd.Start()

    t1 = Thread(target=fd.RecData)
    t1.start()

    model_path = cfg.model_path
    start_time = time.time()
    p = Predictor(model_path)

    j = 0
    # p.Capture()
    # p.Show()

    p.ShowWithModel()
    time.sleep(1)

