# -*- coding: utf-8 -*-
# @Time : 2023/4/7 13:55
# @Author : 陈鹏飞
# @Email ： 2578925789@qq.com
# @File : detect
# @Description : 

import os
import cv2
import torch
from ultralytics import YOLO
from tqdm import tqdm
import json
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tempfile import NamedTemporaryFile

CLASSES = ['face', 'hand', 'cigarette', 'cellphone']

def readImg(model, img_path, save_path):
    for imgname in os.listdir(img_path):
        imgfile = os.path.join(img_path, imgname)
        img = cv2.imread(imgfile)
        # Run YOLOv8 inference on the frame
        results = model(img)

        # Visualize the results on the frame
        annotated_frame = results[0].plot()

        if not os.path.exists(save_path):
            os.makedirs(save_path)
        img_vis_file = os.path.join(save_path, imgname)
        cv2.imwrite(img_vis_file, annotated_frame)


def readVideo(model, video_path, save_path):
    cap = cv2.VideoCapture(video_path)

    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    fps = cap.get(cv2.CAP_PROP_FPS)

    fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    video_vis_path = os.path.join(save_path, video_path.split("/")[-1].split(".")[0]+".avi")
    writer = cv2.VideoWriter(video_vis_path, fourcc, fps, (int(width), int(height)))

    # Loop through the video frames
    while cap.isOpened():
        # Read a frame from the video
        success, frame = cap.read()

        if success:
            # Run YOLOv8 inference on the frame
            results = model(frame)

            # Visualize the results on the frame
            annotated_frame = results[0].plot()
            if save_path:
                writer.write(annotated_frame)
                cv2.imwrite(os.path.join(save_path, "vis.jpg"), annotated_frame)
            else:
                # Display the annotated frame
                cv2.imshow("YOLOv8 Inference", annotated_frame)

                # Break the loop if 'q' is pressed
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break
        else:
            # Break the loop if the end of the video is reached
            break

    # Release the video capture object and close the display window
    cap.release()
    if save_path:
        writer.release()
    else:
        cv2.destroyAllWindows()

def readJson(model, save_path):
    # 推理验证集
    imgs_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/image'
    json_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/val.json'
    with open(json_dir, 'r') as f:
        new_dict = json.load(f)
        print(new_dict.keys())
        for tt in tqdm(new_dict['images']):
            filename = tt['file_name']
            imgfile = os.path.join(imgs_dir, filename)
            img = cv2.imread(imgfile)
            # Run YOLOv8 inference on the frame
            results = model(img)

            # Visualize the results on the frame
            annotated_frame = results[0].plot()

            boxes = results[0].boxes
            masks = results[0].masks
            probs = results[0].probs
            names = results[0].names
            hide_labels, hide_conf = False, False
            if boxes is not None:
                for d in reversed(boxes):
                    c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
                    name = ('' if id is None else f'id:{id} ') + names[c]
                    label = None if hide_labels else (name if hide_conf else f'{name} {conf:.2f}')
                    box = d.xyxy.squeeze().tolist()
                    # print(box)

            if not os.path.exists(save_path):
                os.makedirs(save_path)
            img_vis_file = os.path.join(save_path, filename)
            cv2.imwrite(img_vis_file, annotated_frame)


def txt2json(txt_file, root):
    data = {}
    # data['info'] = "syptensor created"
    # data['license'] = ["license"]
    data['images'] = []
    data['annotations'] = []
    data['categories'] = [{"id": 1, "name": "face"}, {"id": 2, "name": "hand"}, {"id": 3, "name": "cigarette"},
                          {"id": 4, "name": "cellphone"}]

    k = 1
    p = 1
    with open(txt_file, 'r', encoding='utf8') as f:
        lines = f.readlines()
        for line in tqdm(lines):
            line = line.strip()
            tt = line.split(',')
            name = tt[0]

            img = cv2.imread(os.path.join(root, name))
            # print(name)
            h, w, c = img.shape
            imggg = {"height": h, "width": w, "id": k, "file_name": name}
            data['images'].append(imggg)

            n = len(tt) // 5
            for i in range(n):
                ind = (i + 1) * 5
                y2 = int(float(tt[ind - 1]))
                x2 = int(float(tt[ind - 2]))
                y1 = int(float(tt[ind - 3]))
                x1 = int(float(tt[ind - 4]))
                w = x2 - x1
                h = y2 - y1
                label = int(tt[ind]) + 1

                anno = {"id": p, "image_id": k, "category_id": label, "bbox": [x1, y1, w, h], "iscrowd": 0,
                        "area": w * h,
                        "segmentation": [[x1, y1, x2, y2]]}
                data['annotations'].append(anno)
                p += 1

            k += 1
    return data


def comput_mAP(json_pr, json_gt):
    coco_gt = COCO(json_gt)

    coco_dt = coco_gt.loadRes(json_pr)

    cocoEval = COCOeval(coco_gt, coco_dt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    #  评估特定类别
    for classID in [1, 2, 3, 4]:
        print('-------------------{}-------------------\n'.format(str(classID)))
        cocoEval.params.catIds = [classID]
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

def evaluate(model, pred_dir, save_path):
    imgs_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/image'
    json_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/val.json'
    # 统计平均置信度，衡量置信度变化情况
    face_confs = 0.0
    face_num = 0
    hand_confs = 0.0
    hand_num = 0
    cigarette_confs = 0.0
    cigarette_num = 0
    cellphone_confs = 0.0
    cellphone_num = 0
    ''''''
    annotation_id = 0
    anns = []
    with open(json_dir, 'r') as f:
        new_dict = json.load(f)
        print(new_dict.keys())
        for tt in tqdm(new_dict['images']):
            filename = tt['file_name']
            image_id = tt['id']
            img_dir = os.path.join(imgs_dir, filename)
            orig = cv2.imread(img_dir)
            img_h, img_w = orig.shape[:2]

            # Run YOLOv8 inference on the frame
            results = model(orig)

            # Visualize the results on the frame
            annotated_frame = results[0].plot()

            boxes = results[0].boxes
            masks = results[0].masks
            probs = results[0].probs
            names = results[0].names
            hide_labels, hide_conf = False, False
            temp = []
            if boxes is not None:
                for d in reversed(boxes):
                    c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
                    name = ('' if id is None else f'id:{id} ') + names[c]
                    label = None if hide_labels else (name if hide_conf else f'{name} {conf:.2f}')
                    box = d.xyxy.squeeze().tolist()

                    if c == 0:
                        face_num += 1
                        face_confs += conf
                    elif c == 1:
                        hand_num += 1
                        hand_confs += conf
                    elif c == 2:
                        cigarette_num += 1
                        cigarette_confs += conf
                    elif c == 3:
                        cellphone_num += 1
                        cellphone_confs += conf

                    cv2.rectangle(orig, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
                    ptext = (int(box[0]), int(box[1]))
                    title = CLASSES[c] + "%.2f" % conf
                    cv2.putText(orig, title, ptext, cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2, cv2.LINE_AA)
                    annotation_info = {'image_id': image_id,
                                       'category_id': c + 1,
                                       'bbox': [int(box[0]), int(box[1]), int(box[2]) - int(box[0]), int(box[3]) - int(box[1])],
                                       'score': conf
                                       }
                    anns.append(annotation_info)
                    annotation_id += 1
                if not os.path.exists(save_path):
                    os.makedirs(save_path)
                # img_path_new = save_path + filename
                # cv2.imwrite(img_path_new, orig)
        with open(pred_dir, 'w', encoding='utf-8') as f:
            json.dump(anns, f, indent=4)
    comput_mAP(pred_dir, json_dir)
    face_confs_ave = face_confs / face_num
    hand_confs_ave = hand_confs / hand_num
    cigarette_confs_ave = cigarette_confs / cigarette_num
    cellphone_confs_ave = cellphone_confs / cellphone_num
    print("face_confs_ave: " + str(face_confs_ave))
    print("hand_confs_ave: " + str(hand_confs_ave))
    print("cigarette_confs_ave: " + str(cigarette_confs_ave))
    print("cellphone_confs_ave: " + str(cellphone_confs_ave))

if __name__ == "__main__":
    mode = 5  # 0:导出纯权重pt 1:推理图片 2:推理验证集 3:获取coco指标 4:已有推理json，只计算ap 5:视频可视化
    baseDir = "./runs/detect/train35"
    img_path = "/home/chenpengfei/dataset/Temp/image/image_20230227"
    save_path = baseDir + "/vis/"

    video_path = "/home/chenpengfei/dataset/testData/dianke_video/02_65_6502_04_65022023121619303600018331517001.mp4"

    # Load the YOLOv8 model
    weights = baseDir + '/weights/best.pt'
    model = YOLO(weights)
    if mode == 0:
        model.save_dict(weights, fuse=False)  # 导出纯权重
    elif mode == 1:
        readImg(model, img_path, save_path)
    elif mode == 2:
        readJson(model, save_path)
    elif mode == 3:
        pred_dir = baseDir + "/cocoval/pred_pt_result.json"
        if not os.path.exists(baseDir + "/cocoval/"):
            os.makedirs(baseDir + "/cocoval/")
        evaluate(model, pred_dir, save_path)
    elif mode == 4:
        pred_dir = "./runs/detect/train/val/predictions_new.json"
        json_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/val.json'
        comput_mAP(pred_dir, json_dir)
    elif mode == 5:
        readVideo(model, video_path, save_path)




