# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.

Usage - sources:
    $ python detect.py --weights yolov5s.pt --source 0                               # webcam
                                                     img.jpg                         # image
                                                     vid.mp4                         # video
                                                     screen                          # screenshot
                                                     path/                           # directory
                                                     'path/*.jpg'                    # glob
                                                     'https://youtu.be/Zgi9g1ksQHc'  # YouTube
                                                     'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream

Usage - formats:
    $ python detect.py --weights yolov5s.pt                 # PyTorch
                                 yolov5s.torchscript        # TorchScript
                                 yolov5s.onnx               # ONNX Runtime or OpenCV DNN with --dnn
                                 yolov5s_openvino_model     # OpenVINO
                                 yolov5s.engine             # TensorRT
                                 yolov5s.mlmodel            # CoreML (macOS-only)
                                 yolov5s_saved_model        # TensorFlow SavedModel
                                 yolov5s.pb                 # TensorFlow GraphDef
                                 yolov5s.tflite             # TensorFlow Lite
                                 yolov5s_edgetpu.tflite     # TensorFlow Edge TPU
                                 yolov5s_paddle_model       # PaddlePaddle
"""

import argparse
import os
import platform
import sys
from pathlib import Path
#********************
#为了使用其他模型而导入的库文件
import imutils
import numpy as np
import numpy.matlib
import onnx
import csv
import time#这里我增建了一个time库
#********************
import torch

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode

'''Part1 Begin
********************************************************************************************************************************
'''

#from keras.preprocessing.image import img_to_array#keras更新版本之后，这里的方法迁移了。
#变为from keras.utils import image_utils
#所以上面这一行要改为from keras.utils import image_utils

#keras部分
import tensorflow as tf
import keras
#from keras.utils import image_utils#使用的是from keras.utils import image_utils,但是导入后出错，所以不导入了，直接使用tf.keras.image_utils.img_to_array(roi)
from keras.models import load_model
#keras部分到这里结束

print("当前根目录：",ROOT)

#论文计算数值代码从此开始,里面有计算方法说明和函数的说明,代码中也有对代码的解释
'''
#根据论文中的计算方法，计算每一帧的表情预测值，生成新的矩阵
              & W_p       & W_a       & W_d
        困倦  & -0.12     & -1.16     & -0.60 
        疑惑  & 1.0525    & 1.3625    & -0.41
        中性  & 0         & 0         & 0 
        愉悦  & 2.77      & 1.21      & 1.42 
对于当前的表情概率，可以构建学生在不同维度下课堂状态值E_p、E_a、E_d 
用W_p、W_a、W_d 分别代表PAD模型中对应三个维度的权重
k是四种不同表情的标号，取值范围为[1,2,3,4]
P_k是当前表情的预测率值，avg是以某时刻的表情在三个维度状态值和的平均值作为对学生当前表情的特征值。
0.0054,0.0002,0.3451,0.6493
'''

#下面是PAD模型中对应三个维度的权重
W_p = np.array([[-0.12],[1.0525],[0.0],[2.77]])
W_a = np.array([[-1.16],[1.3625],[0.0],[1.21]])
W_d = np.array([[-0.60],[1.2100],[0.0],[1.42]])

#获取每个时刻表情的PAD模型值矩阵，并存储下来
def get_avg(exps_matrix):
    #除了第一列，其余列都用，因为第一列是标号，row_number是矩阵的行数
    row_number = exps_matrix.shape[0]
    avg_arry = np.zeros((1,row_number))#生成指定1行row_number列数的全0.0矩阵，存储接下来的各个帧表情得分值
    #计算每一行的表情在PAD模型下得分，存储到新的矩阵中
    for i in range(0,row_number):
        E_p = np.dot (exps_matrix[i],W_p)
        E_a = np.dot (exps_matrix[i],W_a)
        E_d = np.dot (exps_matrix[i],W_d)
        avg = (E_p + E_a + E_d)/3.0
        #该操作将之前的每一个avg值存储到每个学生在一节课时间上的得分矩阵中
        avg_arry[0,i] = avg
    return avg_arry

#计算表情值矩阵行向量的均值，方差，标准差
def get_mean_var_std(arry):
    #求均值
    arry_mean = np.mean(arry)
    #求方差
    arry_var = np.var(arry)
    #求标准差
    arry_std = np.std(arry,ddof=1)
    #输出行向量的各项值
    print("************************************")
    print("行向量均值：%.6f" % arry_mean)
    print("行向量方差：%.6f" % arry_var)
    print("行向量标准差:%.6f" % arry_std)
    print("************************************")
    return arry_mean ,arry_var ,arry_std

#计算两个行向量之间的距离
def get_distance(vec_x,vec_y):
    distance = np.linalg.norm(vec_x - vec_y)
    #distance2 = np.sqrt(np.sum(np.square(vec_y - vec_x)))#这里有两种计算方法，结果是一样的
    print("按冒泡法任意两个行向量的距离：",distance)
    #print("第2个和第1个行向量的距离：",distance2)
    print("************************************")
    return distance

#论文计算数值代码到此结束

'''Part1 End
********************************************************************************************************************************
'''

@smart_inference_mode()
def run(
        weights=ROOT / 'yolov5s.pt',  # model path or triton URL
        source=ROOT / 'data/images',  # file/dir/URL/glob/screen/0(webcam)
        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
        imgsz=(640, 640),  # inference size (height, width)
        conf_thres=0.25,  # confidence threshold
        iou_thres=0.45,  # NMS IOU threshold
        max_det=1000,  # maximum detections per image
        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        view_img=False,  # show results
        save_txt=False,  # save results to *.txt
        save_conf=False,  # save confidences in --save-txt labels
        save_crop=False,  # save cropped prediction boxes
        nosave=False,  # do not save images/videos
        classes=None,  # filter by class: --class 0, or --class 0 2 3
        agnostic_nms=False,  # class-agnostic NMS
        augment=False,  # augmented inference
        visualize=False,  # visualize features
        update=False,  # update all models
        project=ROOT / 'runs/detect',  # save results to project/name
        name='exp',  # save results to project/name
        exist_ok=False,  # existing project/name ok, do not increment
        line_thickness=3,  # bounding box thickness (pixels)
        hide_labels=False,  # hide labels
        hide_conf=False,  # hide confidences
        half=False,  # use FP16 half-precision inference
        dnn=False,  # use OpenCV DNN for ONNX inference
        vid_stride=1,  # video frame-rate stride
):
    source = str(source)
    save_img = not nosave and not source.endswith('.txt')  # save inference images
    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
    is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
    webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
    screenshot = source.lower().startswith('screen')
    if is_url and is_file:
        source = check_file(source)  # download

    # Directories
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
    (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir

    # Load model
    device = select_device(device)
    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
    stride, names, pt = model.stride, model.names, model.pt#names就是标注中的类别名字
    imgsz = check_img_size(imgsz, s=stride)  # check image size

    '''Part2 Begin
    ********************************************************************************************************************************
    '''

    #增加代码，用于表情识别网络分类器emotion_classifier
    #表情识别网络emotion_classifier
    emotion_model_path = 'my_weights/big_XCEPTION.91-0.98.h5'
    emotion_classifier = load_model(emotion_model_path, compile=False)
    EXPRESSIONS = ["Tired","Confused","Neutral","Happy"]
    #EXPRESSIONS = ["T","C","N","H"]#由于全部显示图像太拥挤，因此用首字母代替
    #创建矩阵存储表情值
    output_info_title = ["帧数", "编号", "信度1", "信度2", "信度3", "信度4"]
    output_info_type = ["integer", "string", "float", "float", "float", "float"]
    ordinary_arry = np.array([output_info_title,output_info_type])

    '''Part2 End
    ********************************************************************************************************************************
    '''

    # Dataloader
    bs = 1  # batch_size
    if webcam:
        view_img = check_imshow(warn=True)
        dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
        bs = len(dataset)
    elif screenshot:
        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
    else:
        dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
    vid_path, vid_writer = [None] * bs, [None] * bs
    #注释：上面这句没看懂，意思是如果是视频检测？后面是啥意思？可能是如果有多个vid_path，
    #就是多路输入？vid_writer就是根据bs的大小，决定一次性写入几张帧

    # Run inference
    model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz))  # warmup
    seen, windows, dt = 0, [], (Profile(), Profile(), Profile())

    #笔者在这里增加了一个时间戳来标识每次识别的概率文件
    csv_timestamp = time.strftime("%Y%m%d%H%M%S",time.localtime(time.time()))

    for path, im, im0s, vid_cap, s in dataset:
        with dt[0]:
            im = torch.from_numpy(im).to(model.device)#注释：这里是将图像或者帧转为tensor的张量
            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32 #转为float，后面除以255就能得到小数
            im /= 255  # 0 - 255 to 0.0 - 1.0 #注释：归一化
            if len(im.shape) == 3:
                im = im[None]  # expand for batch dim

        # Inference
        with dt[1]:
            visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
            pred = model(im, augment=augment, visualize=visualize)
            #注释：上一行的im已经是处理过的张量了，此时im符合网络输入要求，上面的im0是图或者帧
            #注释：这里的"visualize=visualize" 可视化特征，"augment=augment"对应增强推理

        #注释：NMS为非极大值抑制，免得同一个目标被多重标框

        # NMS
        with dt[2]:
            pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)

        # Second-stage classifier (optional)
        # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)

        # Process predictions
        for i, det in enumerate(pred):  # per image
            #注释：可以使用"print("i的值：",i)"来测试i是什么
            #注释：可以用"print("第一处检测的det值:",det)"来输出det内容
            #注释：下面就是det的值，明显可见是一个tensor张量矩阵。
            #第一列是检测对象的左上角x坐标，第二列左上角y坐标，第三列右下角x坐标，
            #第四列右下角y坐标，第五列检测对象信度，第六列检测目标的类名
            '''
            # tensor([
            [2.70695e+02, 2.21441e+02, 2.87307e+02, 2.44074e+02, 8.90430e-01, 1.00000e+00],
            [1.00378e+02, 1.75017e+02, 1.21737e+02, 2.02016e+02, 8.67976e-01, 1.30000e+01],
            [1.51981e+02, 2.57111e+02, 1.71357e+02, 2.86305e+02, 8.53957e-01, 0.00000e+00],
            [3.90679e+02, 1.44954e+02, 4.01846e+02, 1.61478e+02, 7.78552e-01, 1.10000e+01],
            [4.71554e+02, 1.76641e+02, 4.83244e+02, 1.92451e+02, 7.67488e-01, 2.00000e+00],
            [1.70493e+02, 1.40454e+02, 1.84549e+02, 1.58634e+02, 7.62521e-01, 1.50000e+01],
            [2.00545e+02, 1.79748e+02, 2.15696e+02, 1.97838e+02, 6.73865e-01, 1.20000e+01],
            [5.06553e+02, 1.52032e+02, 5.22645e+02, 1.70745e+02, 6.68628e-01, 3.00000e+00],
            [3.21738e+02, 1.28788e+02, 3.32333e+02, 1.40552e+02, 6.46049e-01, 1.60000e+01],
            [3.73485e+02, 1.21496e+02, 3.82510e+02, 1.32394e+02, 5.03905e-01, 1.60000e+01],
            [4.69281e+02, 1.27338e+02, 4.81245e+02, 1.40449e+02, 4.11755e-01, 9.00000e+00],
            [5.07059e+02, 1.53500e+02, 5.21944e+02, 1.71232e+02, 2.85966e-01, 2.00000e+00],
            [4.71904e+02, 9.18354e+01, 4.80508e+02, 1.00761e+02, 2.72819e-01, 2.00000e+01],
            [5.88189e+02, 1.08103e+02, 5.99460e+02, 1.19065e+02, 2.69952e-01, 1.40000e+01]
            ], device='cuda:0')
            '''
            seen += 1#注释：seen是已经检测过的帧数或者照片数
            if webcam:  # batch_size >= 1
                p, im0, frame = path[i], im0s[i].copy(), dataset.count
                s += f'{i}: '#注释：这里是格式化输出
            else:
                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
                #注释：p时当前文件的路径，例如:C:\yolov5\test.mp4
                #注释：im0是视频帧(具体请查阅资料)
                #注释：frame是当前读取的第几帧的数值(针对视频或者图片文件总数而言)，从1开始的
            #注释：定义了以每一帧的名称命名的文件夹名称
            frame_dir_name = frame#这一行是笔者为了存储每一帧增加的

            p = Path(p)  # to Path #注释：下文中的p.stem 是Python目录用法，可以查阅文档
            save_path = str(save_dir / p.name)  # im.jpg
            txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # im.txt
            s += '%gx%g ' % im.shape[2:]  # print string #注释：%gx%g 对应的是 384 * 640 也就是im.shape[2:]的值,所以s此处，不算前面的输出值应该是:384x640。im.shape[2:]的输出值应该是 ：torch.Size([384, 640])
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh #注释：对应着：tensor([1920, 1080, 1920, 1080])
            imc = im0.copy() if save_crop else im0  # for save_crop #注释：这里可以使用"print(im0.shape)"来获取im0的shape内容，获得值为：(1080, 1920, 3)
            annotator = Annotator(im0, line_width=line_thickness, example=str(names))#注释：可以使用"print("annotator内容：",annotator.result())"来检测结果矩阵

            '''Part3 Begin
            ********************************************************************************************************************************
            '''
            #接下来开始第3部分，也就是代码的大更改，以使用yolo获取的识别区域roi值，传递给表情识别模型
            #同时，将人脸检测的数值和表情检测的数值都存储到文本中，供后续处理
            with open("CSV/" + f'{csv_timestamp}deteted_probability.csv', 'a', newline  = '') as probability_f:
                csv_writer = csv.writer(probability_f)
                if len(det):#注释：len(det)就是当前的视频或者图片中检测到的目标数量
                    # Rescale boxes from img_size to im0 size
                    #注释：如果在这里输出：det[:, :4]，那么内容是tensor([[588.18903, 108.10310, 599.45978, 119.06472]], device='cuda:0')，就是缩放图640*480大小的检测目标坐标。
                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()#注释：这里实现的是：#将640*480图像中的检测坐标转为1080*1920原图坐标。
                    #注释：im.shape[2:] = [384, 640], im0.shape =(1080, 1920, 3)
                    #注释：#还原到1920*1080原视频图中的坐标 tensor([[1765.,  288., 1798.,  321.]], device='cuda:0')

                    # Print results
                    #注释：det 是检测到的人脸2维矩阵值， 6个一组，分别对应目标的center_x,center_y,width,height,信度confidence，class类名的值
                    #tensor([[9.41000e+02, 3.54000e+02, 9.73000e+02, 3.97000e+02, 6.22414e-01, 0.00000e+00],
                    #        [5.62000e+02, 7.65000e+02, 6.02000e+02, 8.23000e+02, 2.00629e-01, 0.00000e+00]], device='cuda:0')
                    for c in det[:, 5].unique():#c是类名的值，class 0 的值就是 0，通过unique()函数进行去重
                        n = (det[:, 5] == c).sum()  # detections per class#注释：detections per class 由于c已经去重，因此此时就是检测到的类别数量# n的输出值为 tensor(11, device='cuda:0')也就是累加起来某个类别在此次检测中的数值，因为有可能将来检测多个类别
                        s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
                        my_count = f"{n}"#这里获取每一帧检测的数值，为下文保存剪裁的图片依次命名做准备
                        # my_count这里就是检测到图片或者视频中的某个目标比如人脸数目
                        # "{n} 对应上面检测到的人脸数量，{names[int(c)]} 对应的是class的名称 "face",
                        # 而{'s' 则对应faces后面的字母s，(n > 1)则是当人脸数目大于1个时采用复数形式
                        # 打印输出  43 faces, 字符串

                    #这里笔者加入了学生总人数预测和出勤率的显示，以及检测到的表情的占比
                    stus_total_count = 49 #可以在这里设定班级的总人数是多少，这样可以获得检测到的人脸比例值

                    if len(det) >= stus_total_count: #加入逻辑判断，如果检测到所有人，那么出勤率设定为100%
                        attendence_ratio = (stus_total_count / stus_total_count) * 100
                        attendence_ratio_label = f'{"All:"} {stus_total_count} {"Counted:"}{stus_total_count} {"AttendenceRatio:"}{attendence_ratio:.2f}'
                    else:
                        attendence_ratio = (len(det) / stus_total_count *100) #如果人数未能全部检测到，那么比例小于100%
                        attendence_ratio_label = f'{"All:"}{stus_total_count} {"Counted:"}{len(det)} {"AttendenceRatio:"}{attendence_ratio:.2f}'

                    #下面这行代码将出勤率信息进行标注，显示在目标视频上，或者进行实时显示.
                    #第一个参数[0,10,0,11]是显示的位置，第二个是出勤率数值，第三个color是颜色
                    annotator.box_label([0,10,0,11], attendence_ratio_label, color = colors(int(attendence_ratio), True))

                    expression0_count = 0 #设置表情0的初始计数值为0
                    expression1_count = 0 #设置表情1的初始计数值为0
                    expression2_count = 0 #设置表情2的初始计数值为0
                    expression3_count = 0 #设置表情3的初始计数值为0

                    # Write results
                    for *xyxy, conf, cls in reversed(det):
                        # 从这里笔者加入了自定义表情检测模块
                        faces = xyxy # 在这里获取模型检测的人脸区域坐标值xyxy
                        # 下面的四个参数fXleft, fYleft, fXright, fYright是检测到的人脸区域框的左上角和右下角坐标值
                        fXleft, fYleft, fXright, fYright = int(faces[0].item()),int(faces[1].item()),int(faces[2].item()),int(faces[3].item())
                        gray = cv2.cvtColor(imc, cv2.COLOR_BGR2GRAY)# 在这里将前面的imc人脸区域值转为灰度值。，当然不转也行，但是计算量就大了
                        roi =  gray[fYleft:fYright, fXleft:fXright]#这里再绘制出灰度roi区域
                        roi = cv2.resize(roi, (48, 48))#按照表情模型的输入大小进行缩放，该比例大小可以在表情识别模型训练阶段进行修改
                        roi = roi.astype("float") / 255.0#归一化处理
                        roi = tf.keras.image_utils.img_to_array(roi)#将roi区域转为矩阵数值。使用的是from keras.utils import image_utils,但是导入后出错，所以不导入了，直接使用tf.keras.image_utils.img_to_array(roi)
                        face_input = np.expand_dims(roi,axis=0)#这里一定要扩充维度，否则和表情模型不匹配
                        preds = emotion_classifier.predict(face_input)[0]#将该区域roi送入表情检测模型进行检测

                        # 该行实现了将每种表情的预测概率按照4位浮点数存入列表each_probability
                        each_probability = ["{:.4f}".format(preds[0]), "{:.4f}".format(preds[1]),"{:.4f}".format(preds[2]),"{:.4f}".format(preds[3])]
                        max_probability = max(each_probability)#获取表情检测列表中的最大值，这是当前最有可能的表情，后面显示到检测的实时视频上会用到
                        max_loc = each_probability.index(max_probability)#获取表情检测列表中的最大值的位置0,1,2,3中的一个
                        stu_name = names[int(cls)]#将学生编号与学生列表进行对应

                        # 输出信息，包含当前的第几帧计数frame_dir_name，就是当前检测到多少帧，学生的编号stu_name，对应的最可能表情值EXPRESSIONS[max_loc]和其概率max_probability
                        # 每种表情的名称EXPRESSIONS[0]和其对应的概率each_probability[0],
                        output_info = [frame_dir_name, stu_name, "{:.4f}".format(conf), EXPRESSIONS[max_loc],  max_probability, EXPRESSIONS[0], each_probability[0], EXPRESSIONS[1], each_probability[1], EXPRESSIONS[2], each_probability[2], EXPRESSIONS[3], each_probability[3]]
                        row_info = np.array([output_info[0],output_info[1],output_info[6],output_info[8],output_info[10],output_info[12]])
                        ordinary_arry = np.append(ordinary_arry,[row_info],axis = 0)

                        csv_writer.writerow(output_info[:])#将输出信息写入到到csv文件
                        # print(stu_name, "最大可能：",EXPRESSIONS[max_loc] + ":" + max_probability + "每种概率：",EXPRESSIONS[0] + ":" + each_probability[0],EXPRESSIONS[1] + ":" +each_probability[1],EXPRESSIONS[2] + ":" +each_probability[2],EXPRESSIONS[3] + ":" +each_probability[3])
                        f'{names[int(cls)]} {conf:.2f}{EXPRESSIONS[max_loc]}{max_probability}'

                        #将四种表情的人数分布也进行显示
                        if (max_loc == 0):
                            expression0_count = (expression0_count + 1)
                        elif (max_loc == 1):
                            expression1_count = (expression1_count + 1)
                        elif (max_loc == 2):
                            expression2_count = (expression2_count + 1)
                        else:
                            expression3_count = (expression3_count + 1)
                        ratio0 = "%.2f%%" %(expression0_count / stus_total_count *100)
                        ratio1 = "%.2f%%" %(expression1_count / stus_total_count *100)
                        ratio2 = "%.2f%%" %(expression2_count / stus_total_count *100)
                        ratio3 = "%.2f%%" %(expression3_count / stus_total_count *100)

                        # 这里注释了原文的if save_txt if save_img or save_crop or view_img if save_crop 三个函数
                        # if save_txt:  # Write to file
                        #     xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                        #     line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format
                        #     with open(f'{txt_path}.txt', 'a') as f:
                        #         f.write(('%g ' * len(line)).rstrip() % line + '\n')
                        # if save_img or save_crop or view_img:  # Add bbox to image
                        #     c = int(cls)  # integer class
                        #     label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
                        #     annotator.box_label(xyxy, label, color=colors(c, True))
                        # if save_crop:
                        #     save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)

                        if save_txt:# Write to file如果要保存检测的文本结果，
                            xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh 归一化了，变为yolo标注格式
                            #这一行改为如下，保存了类名，信度，最大表情预测值，对应表情编号，每种表情概率
                            line = (cls, conf, max_loc, float(max_probability), float(each_probability[0]),float(each_probability[1]),float(each_probability[2]),float(each_probability[3])) if save_conf else (cls, max_loc, float(max_probability), float(each_probability[0]),float(each_probability[1]),float(each_probability[2]),float(each_probability[3]))
                            #line 的值是相当于按照标注格式，将类名，xyxy位置信息，信度值存储到txt文档(tensor(14., device='cuda:0'), 0.9278646111488342, 0.2819444537162781, 0.01718750037252903, 0.03055555559694767, tensor(0.26995, device='cuda:0'))
                            # label format，文本的标签，包含类名，center_x,center_y,width,height,就是按照YOLO格式的目标数据
                            with open(txt_path + '.txt', 'a') as f:
                                f.write(('%g ' * len(line)).rstrip() % line + '\n')

                        if save_img or save_crop or view_img:  # Add bbox to image
                            c = int(cls)  # integer class将类名的值转为整型
                            label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}{EXPRESSIONS[max_loc]}{max_probability}')#是否打上标签和标签内容
                            # print("label:",label)#s2 0.89
                            annotator.box_label(xyxy, label, color=colors(c, True))

                        if save_crop:
                            #思路:首先，每一帧保存一个文件夹，这样能在同一时间获得所有人的当前表情图片
                            # 其次，每个人保存到一个文件夹，这样可以获得每个人的一节课表情图片变化图
                            #save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] /  str(frame_dir_name) / f'my_count.jpg', BGR=True)#这里保存检测到的目标裁剪图片,按照每一帧的名字命名文件夹并存储对象
                            now = time.strftime("%Y%m%d%H%M%S",time.localtime(time.time()))
                            #save_one_box(xyxy, imc, file=save_dir / 'crops' /  f'{now}.jpg', BGR=True)#按时间存储检测到的人脸
                            save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{now}.jpg', BGR=True)#按不同的类别分别存储
                            #细细看格式imc是im0.copy()的一个copy

                    # 生成每种表情的人数标签
                    each_expression_label = f"{'Tired:'}{expression0_count}{'--'}{ratio0} {'Confused:'}{expression1_count}{'--'}{ratio1} {'Neutral:'}{expression2_count}{'--'}{ratio2} {'Happy:'}{expression3_count}{'--'}{ratio3}"
                    # 该行将每一帧每种表情的人数和比例标注到目标视频上，或者即时显示
                    annotator.box_label([0,80,0,81], each_expression_label, color=colors(int(my_count), True))
                else:
                    # 否则就是没有检测到数值，将该行数值写为全0
                    csv_writer.writerow([frame_dir_name,0.0000,0.0000,0.0000,0.0000,0.0000])

            # Stream results
            im0 = annotator.result()
            if view_img:
                if platform.system() == 'Linux' and p not in windows:
                    windows.append(p)
                    cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
                    cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
                cv2.imshow(str(p), im0)
                cv2.waitKey(1)  # 1 millisecond

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'image':
                    cv2.imwrite(save_path, im0)
                else:  # 'video' or 'stream'
                    if vid_path[i] != save_path:  # new video
                        vid_path[i] = save_path
                        if isinstance(vid_writer[i], cv2.VideoWriter):
                            vid_writer[i].release()  # release previous video writer
                        if vid_cap:  # video
                            fps = vid_cap.get(cv2.CAP_PROP_FPS)
                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        else:  # stream
                            fps, w, h = 30, im0.shape[1], im0.shape[0]
                        save_path = str(Path(save_path).with_suffix('.mp4'))  # force *.mp4 suffix on results videos
                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
                    vid_writer[i].write(im0)

        # Print time (inference-only)
        LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
    
    #这里完成了数组的输出
    print("表情矩阵\n",ordinary_arry)

    #对表情矩阵进行切片分割形成每个同学的各自矩阵
    #需要获取矩阵的shape
    ordinary_arry_rows = ordinary_arry.shape[0]#行数
    ordinary_arry_cols = ordinary_arry.shape[1]#列数
    print("表情矩阵行数：",ordinary_arry_rows,"表情矩阵列数：",ordinary_arry_cols)

    #总计统计到多少个学生
    unique_arry = ordinary_arry[:1, 1]
    unique_stu,counts = np.unique(unique_arry,return_counts = True)
    dict(zip(unique_stu,counts))
    print(len(dict(zip(unique_stu,counts))))
    print(dict(zip(unique_stu,counts)))
    # get_avg(ordinary_arry)
    # get_mean_var_std(get_avg(ordinary_arry))

    # Print results
    t = tuple(x.t / seen * 1E3 for x in dt)  # speeds per image
    LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
    if save_txt or save_img:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
        LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
    if update:
        strip_optimizer(weights[0])  # update model (to fix SourceChangeWarning)


def parse_opt():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
    parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
    parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
    parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--view-img', action='store_true', help='show results')
    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
    parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
    parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
    parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
    parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--visualize', action='store_true', help='visualize features')
    parser.add_argument('--update', action='store_true', help='update all models')
    parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
    parser.add_argument('--name', default='exp', help='save results to project/name')
    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
    parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
    parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
    parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
    parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
    opt = parser.parse_args()
    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand
    print_args(vars(opt))
    return opt


def main(opt):
    check_requirements(exclude=('tensorboard', 'thop'))
    run(**vars(opt))


if __name__ == "__main__":
    opt = parse_opt()
    main(opt)
