import os
import sys
import cv2
from utils.tool import *
from module.detector import Detector
import torch
from pathlib import Path
from Camrea import Carmera
import numpy as np

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative


class FastestDet:
    def __init__(self):
        self.weights = ROOT / 'weights/weight_AP05:0.961837_150-epoch.pth'
        self.device = torch.device("cpu")
        self.cfg = LoadYaml('./configs/fruit.yaml')
        self.thresh = 0.65
        self.model = Detector(self.cfg.category_num, True).to(self.device)
        self.model.load_state_dict(torch.load(self.weights, map_location=self.device))
        # sets the module in eval node
        self.model.eval()

    def run(self, ori_img):
        # ori_img = cv2.imread(image)
        res_img = cv2.resize(ori_img, (self.cfg.input_width, self.cfg.input_height), interpolation=cv2.INTER_LINEAR)
        img = res_img.reshape(1, self.cfg.input_height, self.cfg.input_width, 3)
        img = torch.from_numpy(img.transpose(0, 3, 1, 2))
        img = img.to(self.device).float() / 255.0
        # 模型推理
        preds = self.model(img)
        # 特征图后处理
        output = handle_preds(preds, self.device, self.thresh)
        # 加载label names
        LABEL_NAMES = []
        with open(self.cfg.names, 'r') as f:
            for line in f.readlines():
                LABEL_NAMES.append(line.strip())
        H, W, _ = ori_img.shape
        scale_h, scale_w = H / self.cfg.input_height, W / self.cfg.input_width
        # 绘制预测框
        boxes, scores, classes = [], [], []
        for box in output[0]:
            box = box.tolist()
            obj_score = box[4]
            category = LABEL_NAMES[int(box[5])]
            x1, y1 = int(box[0] * W), int(box[1] * H)
            x2, y2 = int(box[2] * W), int(box[3] * H)
            cv2.rectangle(ori_img, (x1, y1), (x2, y2), (255, 255, 0), 2)
            #cv2.putText(ori_img, '%.2f' % obj_score, (x1, y1 - 5), 0, 0.7, (0, 255, 0), 1)
            cv2.putText(ori_img, category, (x1, y1 - 25), 0, 0.7, (0, 255, 0), 1)
            boxes.append([x1, y1, x2, y2])
            scores.append(obj_score)
            classes.append(int(box[5]))
        return boxes, scores, classes, ori_img
        # cv2.imshow('ori_img', ori_img)
        # cv2.waitKey(1)

if __name__ == '__main__':
    cam = Carmera()
    fast_det = FastestDet()
    while True:
        color_frame, aligned_depth_frame = cam.get_frame()
        color_intrin_part = cam.get_color_intrin_part(color_frame)
        image = np.asanyarray(color_frame.get_data())
        fast_det.run(image)



