# -*- coding: utf-8 -*-
"""
Created on Tue Jun 15 19:05:16 2021
pip install torch==1.8.1+cu102 torchvision==0.9.1+cu102 torchaudio===0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
Yolov5要求pytorch1.6以上版本

yolov5原生模型所能识别的物体种类汇总(80):
'person','bicycle','car','motorcycle', 'airplane',
'bus','train', 'truck','boat', 'traffic light', 
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 
'cat', 'dog', 'horse', 'sheep', 'cow', 
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 
'umbrella', 'handbag', 'tie', 'suitcase','frisbee', 
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 
'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 
'wine glass', 'cup', 'fork', 'knife', 'spoon', 
'bowl', 'banana','apple', 'sandwich', 'orange', 
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 
'cake', 'chair', 'couch', 'potted plant', 'bed', 
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 
'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 
'toaster', 'sink', 'refrigerator', 'book', 'clock', 
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'

增加: pandas,seaborn
@author: April971
"""
#new import for modbusMaster
import logging
import modbus_tk
import modbus_tk.defines as cst
import modbus_tk.modbus_tcp as modbus_tcp


import sys#导入系统
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QPushButton,QGroupBox, QTextEdit,QLabel,QLineEdit
from PyQt5.QtGui import QPalette, QBrush, QPixmap ,QIcon
from PyQt5.QtCore import QRect,Qt,QDateTime,QTimer
from PIL import Image

import argparse
import time
from pathlib import Path

import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random

from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
    scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized

class FirstUi(QMainWindow):#第一个窗口类
    def __init__(self):
        super(FirstUi, self).__init__()
        self.init_ui()
 
    def init_ui(self):
        self.resize(1300, 680)#设置窗口大小
        self.setFixedSize(1300, 680)
        self.setGeometry(10, 50,1300, 680)
        self.setWindowTitle('AI-Station   天津港边缘计算识别系统')
        '''
        self.setWindowTitle(self.title)
        self.setGeometry(self.left, self.top, self.width, self.height)
        self.setWindowFlags(Qt.MSWindowsFixedSizeDialogHint)  #固定界面大小尺寸 
        '''

        """在窗体内创建button对象"""
        self.groupBox1 = QGroupBox("", self)
        self.groupBox1.setGeometry(QRect(10, 10, 1280, 660))
        self.groupBox2 = QGroupBox("控制按钮", self)
        self.groupBox2.setGeometry(QRect(20, 20, 400,200 ))#(,,,100)
        
        self.groupBox3 = QGroupBox("提示信息", self)
        self.groupBox3.setGeometry(QRect(20, 230, 400, 450))#(,130,,)
        
        self.groupBox4 = QGroupBox("视频显示", self)
        self.groupBox4.setGeometry(QRect(440, 20, 840, 640))
        
        self.groupBox5 = QGroupBox("Rstp URL输入:", self)
        self.groupBox5.setGeometry(QRect(20, 580, 400, 80))
        
        '''
        self.groupBox5 = QGroupBox("camera1", self)
        self.groupBox5.setGeometry(QRect(460, 40, 800, 600))
        '''
        self.label0 = QLabel(self)
        self.label0.setGeometry(QRect(460, 40, 800, 600))   
        
        
        self.button1 = QPushButton("启动AI人员识别", self)
        self.button1.setGeometry(QRect(50, 50, 155, 55))
        self.button2 = QPushButton("启动AI集卡测速", self)
        self.button2.setGeometry(QRect(235, 50, 155, 55))


        #添加的测试按钮（jsk）
        self.button3 = QPushButton("按钮", self)
        self.button3.setGeometry(QRect(50, 125, 340, 55))


        self.button1.clicked.connect(self.but1Click)       
        #self.button2.clicked.connect(self.but2Click)

        self.textEdit1 = QTextEdit(self)
        self.textEdit1.setGeometry(QRect(30, 255, 380, 310))#(,155,,410)
        self.textEdit1.setObjectName("textEdit1")
        
        self.textEdit1.document().setMaximumBlockCount(10000);
        
        self.LineEdit1 = QLineEdit(self)
        self.LineEdit1.setGeometry(QRect(30, 600, 380, 50))
        self.LineEdit1.setObjectName("LineEdit1")
  
    def but1Click(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('--weights', nargs='+', type=str, default='Yxweights/yolov5s.pt', help='model.pt path(s)')
        #parser.add_argument('--source', type=str, default='data/images', help='source')  # file/folder, 0 for webcam
        parser.add_argument('--source', type=str, default='0', help='source')  # file/folder, 0 for webcam
        parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
        parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
        parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
        parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
        parser.add_argument('--view-img', action='store_true', help='display results')
        parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
        parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
        parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
        parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
        parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
        parser.add_argument('--augment', action='store_true', help='augmented inference')
        parser.add_argument('--update', action='store_true', help='update all models')
        parser.add_argument('--project', default='runs/detect', help='save results to project/name')
        parser.add_argument('--name', default='exp', help='save results to project/name')
        parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
        opt = parser.parse_args()
        
        '''
        parser = argparse.ArgumentParser()
        #parser.add_argument('--weights', nargs='+', type=str, default='WYmodel/yolov5m.pt', help='model.pt path(s)')
        parser.add_argument('--weights', nargs='+', type=str, default='Yxweights/yolov5s6.pt.pt', help='model.pt path(s)')
        
        #parser.add_argument('--source', type=str, default='rtsp://admin:1234qwer@192.168.1.108:554/cam/realmonitor?channel=1&subtype=1', help='source')  # file/folder, 0 for webcam
        parser.add_argument('--source', type=str, default='0', help='source')  # file/folder, 0 for webcam
        #parser.add_argument('--source', type=str, default='inference/images', help='source')  # file/folder, 0 for webcam
        #parser.add_argument('--source', type=str, default='rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov', help='source')  # file/folder, 0 for webcam
        #'rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov'大熊兔
        parser.add_argument('--output', type=str, default='inference/output', help='output folder')  # output folder
        parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
        parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
        parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
        parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
        parser.add_argument('--view-img', action='store_true', help='display results')
        parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
        #parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
        parser.add_argument('--classes', default=0, type=int, help='filter by class: --class 0, or --class 0 2 3')
        parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
        parser.add_argument('--augment', action='store_true', help='augmented inference')
        parser.add_argument('--update', action='store_true', help='update all models')
        opt = parser.parse_args()
        '''
        
        ''' '''
        with torch.no_grad():
            save_img=False ######
            
            source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
            save_img = not opt.nosave and not source.endswith('.txt')  # save inference images
            webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
                ('rtsp://', 'rtmp://', 'http://', 'https://'))
            
            # Directories
            save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))  # increment run
            (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir

            # Initialize
            set_logging()
            device = select_device(opt.device)
            half = device.type != 'cpu'  # half precision only supported on CUDA

            # Load model
            model = attempt_load(weights, map_location=device)  # load FP32 model
            stride = int(model.stride.max())  # model stride
            imgsz = check_img_size(imgsz, s=stride)  # check img_size
            if half:
                model.half()  # to FP16
                
            # Set Dataloader
            vid_path, vid_writer = None, None
            if webcam:
                view_img = check_imshow()
                cudnn.benchmark = True  # set True to speed up constant image size inference
                dataset = LoadStreams(source, img_size=imgsz, stride=stride)
            '''
            else:
                dataset = LoadImages(source, img_size=imgsz, stride=stride)
            '''
            # Get names and colors
            names = model.module.names if hasattr(model, 'module') else model.names
            colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
            
            # Run inference
            if device.type != 'cpu':
                model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
            t0 = time.time()
            for path, img, im0s, vid_cap in dataset:
                img = torch.from_numpy(img).to(device)
                img = img.half() if half else img.float()  # uint8 to fp16/32
                img /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img.ndimension() == 3:
                    img = img.unsqueeze(0)
                    
                # Inference
                t1 = time_synchronized()
                pred = model(img, augment=opt.augment)[0]

                # Apply NMS
                pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
                t2 = time_synchronized()
            
                '''
                # Apply Classifier
                if classify:
                    pred = apply_classifier(pred, modelc, img, im0s)
                '''
                # Process detections
                for i, det in enumerate(pred):  # detections per image
                    if webcam:  # batch_size >= 1
                        p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
                    else:
                        p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
                    
                    p = Path(p)  # to Path
                    save_path = str(save_dir / p.name)  # img.jpg
                    txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # img.txt
                    s += '%gx%g ' % img.shape[2:]  # print string
                    gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
                    
                    if len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                        # Print results
                        for c in det[:, -1].unique():
                            n = (det[:, -1] == c).sum()  # detections per class
                            s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
                            
                        # Write results
                        for *xyxy, conf, cls in reversed(det):
                            if save_txt:  # Write to file
                                xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                                line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh)  # label format
                                with open(txt_path + '.txt', 'a') as f:
                                    f.write(('%g ' * len(line)).rstrip() % line + '\n')

                            if save_img or view_img:  # Add bbox to image
                                label = f'{names[int(cls)]} {conf:.2f}'
                                plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)

                    # Print time (inference + NMS)
                    print(f'{s}Done. ({t2 - t1:.3f}s)')
                    
                    # Stream results
                    if view_img:
                        cv2.imshow(str(p), im0)
                        cv2.waitKey(1)  # 1 millisecond
                        
                    # Save results (image with detections)
                    if save_img:
                        if dataset.mode == 'image':
                            cv2.imwrite(save_path, im0)
                        else:  # 'video' or 'stream'
                            if vid_path != save_path:  # new video
                                vid_path = save_path
                            if isinstance(vid_writer, cv2.VideoWriter):
                                vid_writer.release()  # release previous video writer
                            if vid_cap:  # video
                                fps = vid_cap.get(cv2.CAP_PROP_FPS)
                                w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                                h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                            else:  # stream
                                fps, w, h = 30, im0.shape[1], im0.shape[0]
                                save_path += '.mp4'
                            vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
                        vid_writer.write(im0)

        if save_txt or save_img:
            s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
            print(f"Results saved to {save_dir}{s}")

        print(f'Done. ({time.time() - t0:.3f}s)')


def main():
    
    app = QApplication(sys.argv)
    w = FirstUi()#将第一和窗口换个名字
    w.show()#将第一和窗口换个名字显示出来
    sys.exit(app.exec_())#app.exet_()是指程序一直循环运行直到主窗口被关闭终止进程（如果没有这句话，程序运行时会一闪而过）
 
 
if __name__ == '__main__':#只有在本py文件中才能用，被调用就不执行
    main()