# import argparse
# import os
# import time
# from pathlib import Path
# import math
# import cv2
# import torch
# import torch.backends.cudnn as cudnn
# from numpy import random
#
# from models.experimental import attempt_load
# from utils.datasets import LoadStreams, LoadImages
# from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
#     scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
# from utils.plots import plot_one_box
# from utils.torch_utils import select_device, load_classifier, time_synchronized
# parser = argparse.ArgumentParser()
# opt = parser.parse_args()
# ANGLE_WEIGHTS = 30
# DISTANCE_WEIGHTS = 50
# class Solution:
#     def the_big_turns(self, big, center_point):
#         ret = []
#         table = [[0.0] * len(big) for _ in range(len(big))]
#         st = set()
#
#         # 检查是否有点到中心点的距离小于0.05的情况
#         found_close_point = False
#         close_point_index = -1
#         for i in range(len(big)):
#             distance_points = self.get_distance(big[i], center_point)
#             if distance_points <= 0.05:
#                 found_close_point = True
#                 close_point_index = i
#                 break
#
#         if found_close_point:
#             # 使用找到的点移除中心点
#             del big[close_point_index]
#
#         for i in range(len(big)):
#             for j in range(len(big)):
#                 table[i][j] = self.calculate_weights(big[i], big[j], center_point)
#                 print(table[i][j],end=' ')
#             print()
#
#         index_first = 0
#         index_second = 0
#         st.add(0)
#         while len(st) < len(big):
#             max_weights = 0
#             for i in range(len(big)):
#                 if table[index_first][i] > max_weights and i not in st:
#                     index_second = i
#                     max_weights = table[index_first][i]
#             st.add(index_second)
#             index_first = index_second
#
#         for x in st:
#             ret.append(big[x])
#
#         # 最后输出被取代的中心点
#         if found_close_point:
#             ret.append(center_point)
#
#         return ret
#
#     def calculate_weights(self, p1, p2, center_point):
#         angle = self.get_angle(p1, p2, center_point)
#         angle_weights = (angle / 90)
#         distance = self.get_distance(p2, center_point)
#         distance_weights = distance
#         return angle_weights * ANGLE_WEIGHTS + distance_weights * DISTANCE_WEIGHTS
#
#     def get_distance(self, p1, p2):
#         distance = math.sqrt((p1[0] - p2[0])*(p1[0] - p2[0]) + (p1[1] - p2[1])*(p1[1] - p2[1]))
#         #if distance > 0.01:
#         return distance
#
#     def get_angle(self, p1, p2, center_p):
#         x1 = p1[0]
#         y1 = p1[1]
#
#         x2, y2 = center_p
#         x3, y3 = p2
#
#         vectorAB_x = x2 - x1
#         vectorAB_y = y2 - y1
#         vectorBC_x = x3 - x2
#         vectorBC_y = y3 - y2
#
#         dotProduct = vectorAB_x * vectorBC_x + vectorAB_y * vectorBC_y
#         magnitudeAB = math.sqrt(vectorAB_x * vectorAB_x + vectorAB_y * vectorAB_y)
#         magnitudeBC = math.sqrt(vectorBC_x * vectorBC_x + vectorBC_y * vectorBC_y)
#         angleRad = math.acos(max(min(dotProduct / (magnitudeAB * magnitudeBC), 1), -1))
#         angleDeg = angleRad * 180.0 / math.pi
#         return angleDeg
#
# def detect(save_img=False):
#     global content
#     print(os.getcwd())
#     print(os.environ)
#     source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
#     save_img = not opt.nosave and not source.endswith('.txt')  # save inference images
#     webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
#         ('rtsp://', 'rtmp://', 'http://', 'https://'))
#
#     # Directories
#     save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))  # increment run
#     (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
#
#     # Initialize
#     set_logging()
#     device = select_device(opt.device)
#     half = device.type != 'cpu'  # half precision only supported on CUDA
#
#     # Load model
#     model = attempt_load(weights, map_location=device)  # load FP32 model
#     stride = int(model.stride.max())  # model stride
#     imgsz = check_img_size(imgsz, s=stride)  # check img_size
#     if half:
#         model.half()  # to FP16
#
#     # Second-stage classifier
#     classify = False
#     if classify:
#         modelc = load_classifier(name='resnet101', n=2)  # initialize
#         modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
#
#     # Set Dataloader
#     vid_path, vid_writer = None, None
#     if webcam:
#         view_img = check_imshow()
#         cudnn.benchmark = True  # set True to speed up constant image size inference
#         dataset = LoadStreams(source, img_size=imgsz, stride=stride)
#     else:
#         dataset = LoadImages(source, img_size=imgsz, stride=stride)
#
#     # Get names and colors
#     names = model.module.names if hasattr(model, 'module') else model.names
#     colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
#
#     # Run inference
#     if device.type != 'cpu':
#         model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
#     t0 = time.time()
#     for path, img, im0s, vid_cap in dataset:
#         img = torch.from_numpy(img).to(device)
#         img = img.half() if half else img.float()  # uint8 to fp16/32
#         img /= 255.0  # 0 - 255 to 0.0 - 1.0
#         if img.ndimension() == 3:
#             img = img.unsqueeze(0)
#
#         # Inference
#         t1 = time_synchronized()
#         pred = model(img, augment=opt.augment)[0]
#
#         # Apply NMS
#         pred = non_max_suppression(pred, 0.5, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) #opt.conf_thres
#         t2 = time_synchronized()
#
#         # Apply Classifier
#         if classify:
#             pred = apply_classifier(pred, modelc, img, im0s)
#
#         # Process detections
#         for i, det in enumerate(pred):  # detections per image
#             if webcam:  # batch_size >= 1
#                 p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
#             else:
#                 p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
#
#             p = Path(p)  # to Path
#             save_path = str(save_dir / p.name)  # img.jpg
#             txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # img.txt
#             s += '%gx%g ' % img.shape[2:]  # print string
#             gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
#             if len(det):
#                 # Rescale boxes from img_size to im0 size
#                 det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
#
#                 # Print results
#                 for c in det[:, -1].unique():
#                     n = (det[:, -1] == c).sum()  # detections per class
#                     s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
#
#                 # Write results
#                 for *xyxy, conf, cls in reversed(det):
#                     if save_txt:  # Write to file
#                         xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
#                         aaaaaaa=int(cls)
#                         line = (aaaaaaa, *xywh, conf) if opt.save_conf else (aaaaaaa, *xywh)  # label format
#                         # print(int(cls))
#                         # print(int(cls))
#                         #
#                         # print(type(line))
#                         # print(line)
#                         content.append(line)
#                         with open(txt_path + '.txt', 'a') as f:
#                             f.write(('%g ' * len(line)).rstrip() % line + '\n')
#
#                     if save_img or view_img:  # Add bbox to image
#                         label = f'{names[int(cls)]} {conf:.2f}'
#                         plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
#
#             # Print time (inference + NMS)
#             print(f'{s}Done. ({t2 - t1:.3f}s)')
#
#             # Stream results
#             if view_img:
#                 cv2.imshow(str(p), im0)
#                 cv2.waitKey(1)  # 1 millisecond
#
#             # Save results (image with detections)
#             if save_img:
#                 if dataset.mode == 'image':
#                     cv2.imwrite(save_path, im0)
#                 else:  # 'video' or 'stream'
#                     if vid_path != save_path:  # new video
#                         vid_path = save_path
#                         if isinstance(vid_writer, cv2.VideoWriter):
#                             vid_writer.release()  # release previous video writer
#                         if vid_cap:  # video
#                             fps = vid_cap.get(cv2.CAP_PROP_FPS)
#                             w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#                             h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#                         else:  # stream
#                             fps, w, h = 30, im0.shape[1], im0.shape[0]
#                             save_path += '.mp4'
#                         vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
#                     vid_writer.write(im0)
#
#     if save_txt or save_img:
#         s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
#         print(f"Results saved to {save_dir}{s}")
#
#     print(f'Done. ({time.time() - t0:.3f}s)')
#
#
#
# if __name__ == '__main__':
#     content = []
#     parser = argparse.ArgumentParser()
#     parser.add_argument('--weights', nargs='+', type=str, default=r'runs\train\exp12\weights\best.pt', help='model.pt path(s)')
#     parser.add_argument('--source', type=str, default=r'data/images/Image_20230816171920757.bmp', help='source')  # file/folder, 0 for webcam
#     parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
#     parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
#     parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
#     parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
#     parser.add_argument('--view-img', action='store_true', help='display results')
#     parser.add_argument('--save-txt', default=True,action='store_true', help='save results to *.txt')
#     parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
#     parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
#     parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
#     parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
#     parser.add_argument('--augment', action='store_true', help='augmented inference')
#     parser.add_argument('--update', action='store_true', help='update all models')
#     parser.add_argument('--project', default='runs/detect', help='save results to project/name')
#     parser.add_argument('--name', default='exp', help='save results to project/name')
#     parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
#     opt = parser.parse_args()
#     #     print(opt)
#     check_requirements(exclude=('pycocotools', 'thop'))
#     #
#     with torch.no_grad():
#         if opt.update:  # update all models (to fix SourceChangeWarning)
#             for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
#
#                 detect()
#                 strip_optimizer(opt.weights)
#         else:
#             detect()
        #print(type(content))






    # for i in content:
    #     # for j in i:
    #         print(i)
    #         print(type(i))
big_count = 0
mid_count = 0
small_count = 0
    # center_point = []
    #
    # big_list = []
    # mid_list = []
    # small_list = []
    #
    # for i in content:
    #     # for j in i:
    #     if i[0] == 0:
    #         center_point.append(i[1])
    #         center_point.append(i[2])
    #     if i[0] == 1:
    #         big_count = big_count + 1
    #         tmp = []
    #         tmp.append(i[1])
    #         tmp.append(i[2])
    #         big_list.append(tmp)
    #     if i[0] == 2:
    #         mid_count = mid_count + 1
    #         tmp = []
    #         tmp.append(i[1])
    #         tmp.append(i[2])
    #         mid_list.append(tmp)
    #     if i[0] == 3:
    #         small_count = small_count + 1
    #         tmp = []
    #         tmp.append(i[1])
    #         tmp.append(i[2])
    #         small_list.append(tmp)
    # print("dadian:",big_count)
    #
    # big_turns = []
    # big_solution = Solution()
    # big_turns = big_solution.the_big_turns(big_list, center_point)
    #
    # #print(big_turns)
    # # 定义要输出的文件路径
file_path = 'E:/yolo/yolov5-5.0/0814_robot/num.txt'
number_of_screws = '''BIG:{}
MID:{}
SMALL:{}'''.format(big_count,mid_count,small_count)
print(number_of_screws)
# 打开文件并写入内容
with open(file_path, 'w') as file:
    file.write(number_of_screws)

    # file_path1 = 'order.txt'
    # order_of_screws=big_turns
    # with open(file_path, 'w') as file:
    #     file.write(order_of_screws)



# -- coding: utf-8 --
import sys
import _tkinter
import tkinter.messagebox
import argparse
from pathlib import Path
import math
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random

from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
    scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
import tkinter as tk
import sys, os
# current_file_path = os.path.abspath(__file__)
# current_directory = os.path.dirname(current_file_path)
# parent_directory = os.path.dirname(current_directory)
from tkinter import ttk
import ast
import matplotlib.pyplot as plt
from PIL import Image
sys.path.append('E:\yolo\yolov5-5.0')
from MvCameraControl_class import *
from CamOperation_class import *
import config
import socket
from MvImport.CameraParams_header import MV_CC_DEVICE_INFO_LIST, MV_CC_DEVICE_INFO
sys.path.append('E:\Anaconda3\envs\yolov5\lib\site-packages\requests')
# 创建Socket对象
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
content = []
# 获取选取设备信息的索引，通过[]之间的字符去解析
def TxtWrapBy(start_str, end, all):
    start = all.find(start_str)
    if start >= 0:
        start += len(start_str)
        end = all.find(end, start)
        if end >= 0:
            return all[start:end].strip()

def draw_and_save_points_on_image(image_path, txt_file, save_path):
    from PIL import Image, ImageTk
    """
    在给定的图像上绘制坐标点，并保存绘制好的图片。

    参数：
    - image_path：要绘制的图像文件路径
    - coordinates_file：包含坐标点的文件路径
    - save_path：保存绘制好的图片的路径和文件名
    """
    # 读取图像

    image = Image.open(image_path)

    # 读取坐标文件
    coordinates = []
    with open(txt_file, 'r') as file:
        content = file.read()
        try:
            coordinates = ast.literal_eval(content)
        except ValueError:
            print("Invalid coordinate format")

    # 绘制点的图形
    x = [coord[0] * 1914 for coord in coordinates]
    y = [coord[1] * 1077 for coord in coordinates]

    fig, ax = plt.subplots(figsize=(17.2, 9.6))  # 设置图像大小为 1720x960 像素
    ax.imshow(image)
    ax.scatter(x, y)

    for i, coord in enumerate(coordinates):
        ax.annotate(f"#{i + 1}:({int(coord[0] * 1914)}, {int(coord[1] * 1077)})", (x[i], y[i]),
                    textcoords="offset points", xytext=(0, -38), ha='center', color='red', fontsize=10)

    # ax.set_xlabel('X')
    # ax.set_ylabel('Y')
    # ax.set_title('Plot of Points')
    plt.savefig("C:/Users/15068/Desktop/456.jpg", dpi=600)  # 保存绘制好的图片，设置 dpi=100 以保持 1720x960 像素的质量

    # 打开并显示图片

    file_path_now = "C:/Users/15068/Desktop/456.jpg"
    file_obj = open(file_path_now, 'rb')
    width = 350
    height = 350
    image = Image.open(file_obj)
    image = image.resize((width, height), Image.ANTIALIAS)
    photo = ImageTk.PhotoImage(image)
    text_frame_text.config(image=photo)
    text_frame_text.image = photo

    #plt.show()
# 将返回的错误码转换为十六进制显示
def ToHexStr(num):
    chaDic = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'}
    hexStr = ""
    if num < 0:
        num = num + 2 ** 32
    while num >= 16:
        digit = num % 16
        hexStr = chaDic.get(digit, str(digit)) + hexStr
        num //= 16
    hexStr = chaDic.get(num, str(num)) + hexStr
    return hexStr


#新增jpg图像显示
window = tk.Tk()
label_frame_rate = tk.Label(window, text='Picture', width=15, height=1)
label_frame_rate.place(x=265, y=20)
text_frame_text = tk.Label(window, text='Picture',width=500, height=500)
text_frame_text.place(x=270, y=50)

if __name__ == "__main__":
    global deviceList
    deviceList = MV_CC_DEVICE_INFO_LIST()
    global tlayerType
    tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
    global cam
    cam = MvCamera()
    global nSelCamIndex
    nSelCamIndex = 0
    global obj_cam_operation
    obj_cam_operation = 0
    global b_is_run
    b_is_run = False


    # 绑定下拉列表至设备信息索引
    def xFunc(event):
        global nSelCamIndex
        nSelCamIndex = TxtWrapBy("[", "]", device_list.get())


    # ch:枚举相机 | en:enum devices
    def enum_devices():
        global deviceList
        global obj_cam_operation
        deviceList = MV_CC_DEVICE_INFO_LIST()
        tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE
        ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList)
        if ret != 0:
            tkinter.messagebox.showerror('show error', 'enum devices fail! ret = ' + ToHexStr(ret))

        # 显示相机个数
        text_number_of_devices.delete(1.0, tk.END)
        text_number_of_devices.insert(1.0, str(deviceList.nDeviceNum) + 'Cameras')

        if deviceList.nDeviceNum == 0:
            tkinter.messagebox.showinfo('show info', 'find no device!')

        print("Find %d devices!" % deviceList.nDeviceNum)

        devList = []
        for i in range(0, deviceList.nDeviceNum):
            mvcc_dev_info = cast(deviceList.pDeviceInfo[i], POINTER(MV_CC_DEVICE_INFO)).contents
            if mvcc_dev_info.nTLayerType == MV_GIGE_DEVICE:
                print("\ngige device: [%d]" % i)
                strModeName = ""
                for per in mvcc_dev_info.SpecialInfo.stGigEInfo.chModelName:
                    strModeName = strModeName + chr(per)
                print("device model name: %s" % strModeName)

                nip1 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0xff000000) >> 24)
                nip2 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x00ff0000) >> 16)
                nip3 = ((mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x0000ff00) >> 8)
                nip4 = (mvcc_dev_info.SpecialInfo.stGigEInfo.nCurrentIp & 0x000000ff)
                print("current ip: %d.%d.%d.%d\n" % (nip1, nip2, nip3, nip4))
                devList.append(
                    "Gige[" + str(i) + "]:" + str(nip1) + "." + str(nip2) + "." + str(nip3) + "." + str(nip4))
            elif mvcc_dev_info.nTLayerType == MV_USB_DEVICE:
                print("\nu3v device: [%d]" % i)
                strModeName = ""
                for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chModelName:
                    if per == 0:
                        break
                    strModeName = strModeName + chr(per)
                print("device model name: %s" % strModeName)

                strSerialNumber = ""
                for per in mvcc_dev_info.SpecialInfo.stUsb3VInfo.chSerialNumber:
                    if per == 0:
                        break
                    strSerialNumber = strSerialNumber + chr(per)
                print("user serial number: %s" % strSerialNumber)
                devList.append("USB[" + str(i) + "]" + str(strSerialNumber))
        device_list["value"] = devList
        device_list.current(0)

        # ch:打开相机 | en:open device


    def open_device():
        global deviceList
        global nSelCamIndex
        global obj_cam_operation
        global b_is_run
        if True == b_is_run:
            tkinter.messagebox.showinfo('show info', 'Camera is Running!')
            return
        obj_cam_operation = CameraOperation(cam, deviceList, nSelCamIndex)  #类的实例化
        ret = obj_cam_operation.Open_device()
        if 0 != ret:
            b_is_run = False
        else:
            model_val.set('continuous')
            b_is_run = True


    # ch:开始取流 | en:Start grab image
    def start_grabbing():
        global obj_cam_operation
        obj_cam_operation.Start_grabbing()


    # ch:停止取流 | en:Stop grab image
    def stop_grabbing():
        global obj_cam_operation
        obj_cam_operation.Stop_grabbing()

        # ch:关闭设备 | Close device


    def close_device():
        global b_is_run
        global obj_cam_operation
        obj_cam_operation.Close_device()
        b_is_run = False

        # ch:设置触发模式 | en:set trigger mode


    def set_triggermode():
        global obj_cam_operation
        strMode = model_val.get()
        obj_cam_operation.Set_trigger_mode(strMode)


    # ch:设置触发命令 | en:set trigger software
    def trigger_once():
        global triggercheck_val
        global obj_cam_operation
        nCommand = triggercheck_val.get()
        obj_cam_operation.Trigger_once(nCommand)


    # ch:保存bmp图片 | en:save bmp image
    def bmp_save():
        global obj_cam_operation
        obj_cam_operation.b_save_jpg = True
        from PIL import Image, ImageTk
        # 打开并显示图片
        file_path = "C:/Users/15068/Desktop/123.jpg"
        file_obj = open(file_path, 'rb')
        width = 350
        height = 350
        image = Image.open(file_obj)
        image = image.resize((width, height), Image.ANTIALIAS)
        photo = ImageTk.PhotoImage(image)
        text_frame_text.config(image=photo)
        text_frame_text.image = photo
    # ch:保存jpg图片 | en:save jpg image
    def jpg_save():
        global obj_cam_operation
        obj_cam_operation.b_save_jpg = True
        from PIL import Image, ImageTk
        # 打开并显示图片
        file_path = "C:/Users/15068/Desktop/123.jpg"
        file_obj = open(file_path, 'rb')
        width = 350
        height = 350
        image = Image.open(file_obj)
        image = image.resize((width, height), Image.ANTIALIAS)
        photo = ImageTk.PhotoImage(image)
        text_frame_text.config(image=photo)
        text_frame_text.image = photo
    def get_parameter():
        global obj_cam_operation
        obj_cam_operation.Get_parameter()
        text_frame_rate.delete(1.0, tk.END)
        text_frame_rate.insert(1.0, obj_cam_operation.frame_rate)
        text_exposure_time.delete(1.0, tk.END)
        text_exposure_time.insert(1.0, obj_cam_operation.exposure_time)
        text_gain.delete(1.0, tk.END)
        text_gain.insert(1.0, obj_cam_operation.gain)
        window.mainloop()

    def set_parameter():
        global obj_cam_operation
        obj_cam_operation.exposure_time = text_exposure_time.get(1.0, tk.END)
        obj_cam_operation.exposure_time = obj_cam_operation.exposure_time.rstrip("\n")
        obj_cam_operation.gain = text_gain.get(1.0, tk.END)
        obj_cam_operation.gain = obj_cam_operation.gain.rstrip("\n")
        obj_cam_operation.frame_rate = text_frame_rate.get(1.0, tk.END)
        obj_cam_operation.frame_rate = obj_cam_operation.frame_rate.rstrip("\n")
        obj_cam_operation.Set_parameter(obj_cam_operation.frame_rate, obj_cam_operation.exposure_time,
                                        obj_cam_operation.gain)


    def connect_robot_func():
        # 连接UR机器人
        sock.connect((config.robot_ip, config.robot_port))

    def run_robot_func():
        with open('test_012.py', 'r', encoding='utf-8') as file:
            run_robot_code = file.read()
        exec(run_robot_code)
#IP和PORT的输入
    def on_text1_change(*args):
        text_content = text_var1.get()
        config.robot_ip=text_content

    def on_text2_change(*args):
        text_content = text_var2.get()
        config.robot_port=int(text_content)

    def run_yolo_func():
        import argparse
        import os
        import time
        from pathlib import Path
        import math
        import cv2
        import torch
        import torch.backends.cudnn as cudnn
        from numpy import random

        from models.experimental import attempt_load
        from utils.datasets import LoadStreams, LoadImages
        from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, \
            apply_classifier, \
            scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
        from utils.plots import plot_one_box
        from utils.torch_utils import select_device, load_classifier, time_synchronized
        parser = argparse.ArgumentParser()
        opt = parser.parse_args()



        class Solution:
            ANGLE_WEIGHTS = 10
            DISTANCE_WEIGHTS = 100

            # def the_big_turns(self, big, center_point):
            #     ret = []
            #     table = [[0 for _ in range(len(big))] for _ in range(len(big))]
            #     st = set()
            #
            #     # Check if there's a point with distance to the center point less than 0.05
            #     found_close_point = False
            #     close_point_index = -1
            #     for i, point in enumerate(big):
            #         distance_points = self.get_distance(point, center_point)
            #         if distance_points <= 0.08:
            #             found_close_point = True
            #             close_point_index = i
            #             break
            #
            #     if found_close_point:
            #         del big[close_point_index]
            #
            #     for i in range(len(big)):
            #         for j in range(len(big)):
            #             table[i][j] = self.caculate_weights(big[i], big[j], center_point)
            #
            #     index_first = 0
            #     index_second = 0
            #     st.add(0)
            #     while len(st) < len(big):
            #         max_weights = 0
            #         for i in range(len(big)):
            #             if table[index_first][i] > max_weights and i not in st:
            #                 index_second = i
            #                 max_weights = table[index_first][i]
            #         st.add(index_second)
            #         index_first = index_second
            #
            #     for x in st:
            #         ret.append(big[x])
            #
            #     return ret

            def caculate_weights(self, p1, p2, center_p):
                distance = self.get_distance(p1, center_p)
                angle = self.get_angle(p1, p2, center_p)
                return self.DISTANCE_WEIGHTS * distance + self.ANGLE_WEIGHTS * angle

            def get_distance(self, p1, p2):
                distance = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
                return distance if distance > 0.01 else 0

            def get_angle(self, p1, p2, center_p):
                x1, y1 = p1
                x2 = center_p[0]
                y2=center_p[1]
                x3, y3 = p2

                vectorAB_x = x2 - x1
                vectorAB_y = y2 - y1
                vectorBC_x = x2 - x3
                vectorBC_y = y2 - y3

                dotProduct = vectorAB_x * vectorBC_x + vectorAB_y * vectorBC_y
                magnitudeAB = math.sqrt(vectorAB_x ** 2 + vectorAB_y ** 2)
                magnitudeBC = math.sqrt(vectorBC_x ** 2 + vectorBC_y ** 2)

                # Ensure the value is within the domain [-1, 1] for acos
                value_for_acos = dotProduct / (magnitudeAB * magnitudeBC)
                value_for_acos = min(1, max(-1, value_for_acos))

                angleRad = math.acos(value_for_acos)
                angleDeg = angleRad * 180.0 / math.pi

                return angleDeg
        class SolutionOrdered(Solution):
            def the_big_turns(self, big, center_point):
                #sorted_big = sorted(big, key=lambda x: x[0], reverse=True)
                ret = []
                table = [[0 for _ in range(len(big))] for _ in range(len(big))]
                st = []

                # Check if there's a point with distance to the center point less than 0.05
                #found_close_point = False
                #close_point_index = -1
                #for i, point in enumerate(big):
                    #distance_points = self.get_distance(point, center_point)
                    #if distance_points <= 0.:
                        #found_close_point = True
                        #close_point_index = i
                        #break
                #print(len(big))
                # if found_close_point:
                #     del big[close_point_index]

                big_distance=[]
                min_distance=99999
                min_index = -1
                for i in range(len(big)):
                    big_distance.append(self.get_distance(big[i], center_point))
                    if min_distance>self.get_distance(big[i], center_point):
                        min_distance=self.get_distance(big[i], center_point)
                        min_index=i

                del_points=big[min_index]
                print(len(big))
                del big[min_index]
                print(len(big))
                for i in range(len(big)):
                    for j in range(len(big)):
                        table[i][j] = self.caculate_weights(big[i], big[j], center_point)

                index_first = 0
                index_second = 0
                st.append(0)
                while len(st) < len(big):
                    max_weights = 0
                    for i in range(len(big)):
                        if table[index_first][i] > max_weights and i not in st:
                            index_second = i
                            max_weights = table[index_first][i]
                    st.append(index_second)
                    index_first = index_second
                print(len(st))
                for x in st:
                    ret.append(big[x])
                ret.append(del_points)
                # Add the center point to the output if it was found close to any point earlier
                # if found_close_point:
                #     print(center_point)
                #     ret.append(center_point)

                return ret


        def detect(save_img=False):
            global content
            print(os.getcwd())
            print(os.environ)
            source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
            save_img = not opt.nosave and not source.endswith('.txt')  # save inference images
            webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
                ('rtsp://', 'rtmp://', 'http://', 'https://'))

            # Directories
            save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))  # increment run
            (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir

            # Initialize
            set_logging()
            device = select_device(opt.device)
            half = device.type != 'cpu'  # half precision only supported on CUDA

            # Load model
            model = attempt_load(weights, map_location=device)  # load FP32 model
            stride = int(model.stride.max())  # model stride
            imgsz = check_img_size(imgsz, s=stride)  # check img_size
            if half:
                model.half()  # to FP16

            # Second-stage classifier
            classify = False
            if classify:
                modelc = load_classifier(name='resnet101', n=2)  # initialize
                modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(
                    device).eval()

            # Set Dataloader
            vid_path, vid_writer = None, None
            if webcam:
                view_img = check_imshow()
                cudnn.benchmark = True  # set True to speed up constant image size inference
                dataset = LoadStreams(source, img_size=imgsz, stride=stride)
            else:
                dataset = LoadImages(source, img_size=imgsz, stride=stride)

            # Get names and colors
            names = model.module.names if hasattr(model, 'module') else model.names
            colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]

            # Run inference
            if device.type != 'cpu':
                model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
            t0 = time.time()
            for path, img, im0s, vid_cap in dataset:
                img = torch.from_numpy(img).to(device)
                img = img.half() if half else img.float()  # uint8 to fp16/32
                img /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img.ndimension() == 3:
                    img = img.unsqueeze(0)

                # Inference
                t1 = time_synchronized()
                pred = model(img, augment=opt.augment)[0]

                # Apply NMS
                pred = non_max_suppression(pred, 0.7, opt.iou_thres, classes=opt.classes,
                                           agnostic=opt.agnostic_nms)  # opt.conf_thres
                t2 = time_synchronized()

                # Apply Classifier
                if classify:
                    pred = apply_classifier(pred, modelc, img, im0s)

                # Process detections
                for i, det in enumerate(pred):  # detections per image
                    if webcam:  # batch_size >= 1
                        p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
                    else:
                        p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)

                    p = Path(p)  # to Path
                    save_path = str(save_dir / p.name)  # img.jpg
                    config.path=save_path
                    txt_path = str(save_dir / 'labels' / p.stem) + (
                        '' if dataset.mode == 'image' else f'_{frame}')  # img.txt
                    s += '%gx%g ' % img.shape[2:]  # print string
                    gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
                    if len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                        # Print results
                        for c in det[:, -1].unique():
                            n = (det[:, -1] == c).sum()  # detections per class
                            s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

                        # Write results
                        for *xyxy, conf, cls in reversed(det):
                            if save_txt:  # Write to file
                                xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(
                                    -1).tolist()  # normalized xywh
                                aaaaaaa = int(cls)
                                line = (aaaaaaa, *xywh, conf) if opt.save_conf else (aaaaaaa, *xywh)  # label format
                                # print(int(cls))
                                # print(int(cls))
                                #
                                # print(type(line))
                                # print(line)
                                content.append(line)
                                with open(txt_path + '.txt', 'a') as f:
                                    f.write(('%g ' * len(line)).rstrip() % line + '\n')

                            if save_img or view_img:  # Add bbox to image
                                label = f'{names[int(cls)]} {conf:.2f}'
                                plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)

                    # Print time (inference + NMS)
                    print(f'{s}Done. ({t2 - t1:.3f}s)')

                    # Stream results
                    if view_img:
                        cv2.imshow(str(p), im0)
                        cv2.waitKey(1)  # 1 millisecond

                    # Save results (image with detections)
                    if save_img:
                        if dataset.mode == 'image':
                            cv2.imwrite(save_path, im0)
                        else:  # 'video' or 'stream'
                            if vid_path != save_path:  # new video
                                vid_path = save_path
                                if isinstance(vid_writer, cv2.VideoWriter):
                                    vid_writer.release()  # release previous video writer
                                if vid_cap:  # video
                                    fps = vid_cap.get(cv2.CAP_PROP_FPS)
                                    w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                                    h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                                else:  # stream
                                    fps, w, h = 30, im0.shape[1], im0.shape[0]
                                    save_path += '.mp4'
                                vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
                            vid_writer.write(im0)

            if save_txt or save_img:
                s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
                print(f"Results saved to {save_dir}{s}")

            print(f'Done. ({time.time() - t0:.3f}s)')

        #if __name__ == '__main__':
        #content=[]
        parser = argparse.ArgumentParser()
        parser.add_argument('--weights', nargs='+', type=str, default=r'runs\train\exp12\weights\best.pt',
                            help='model.pt path(s)')
        parser.add_argument('--source', type=str, default=r'C:\Users\15068\Desktop\123.jpg',
                            help='source')  # file/folder, 0 for webcam
        parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
        parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
        parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
        parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
        parser.add_argument('--view-img', action='store_true', help='display results')
        parser.add_argument('--save-txt', default=True, action='store_true', help='save results to *.txt')
        parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
        parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
        parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
        parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
        parser.add_argument('--augment', action='store_true', help='augmented inference')
        parser.add_argument('--update', action='store_true', help='update all models')
        parser.add_argument('--project', default='runs/detect', help='save results to project/name')
        parser.add_argument('--name', default='exp', help='save results to project/name')
        parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
        opt = parser.parse_args()
        #     print(opt)
        check_requirements(exclude=('pycocotools', 'thop'))

        with torch.no_grad():
            if opt.update:  # update all models (to fix SourceChangeWarning)
                for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
                    detect()
                    strip_optimizer(opt.weights)
            else:
                detect()
        list_content = []
        for i in content:
            tmp = []
            for j in i:
                tmp.append(j)
            list_content.append(tmp)

        # for i in list_content:
        #     print(i)


        big_count = 0
        mid_count = 0
        small_count = 0
        center_point = []
        big_points = []
        mid_points = []
        small_points = []




        for i in list_content:
            if i[0] == 0:
                center_point.append(i[1])
                center_point.append(i[2])
            if i[0] == 1:
                big_count = big_count + 1
                tmp = []
                tmp.append(i[1])
                tmp.append(i[2])
                big_points.append(tmp)
            if i[0] == 2:
                mid_count = mid_count + 1
                tmp = []
                tmp.append(i[1])
                tmp.append(i[2])
                mid_points.append(tmp)
            if i[0] == 3:
                small_count = small_count + 1
                tmp = []
                tmp.append(i[1])
                tmp.append(i[2])
                small_points.append(tmp)
        center_point[1]=center_point[1]*1.6625

        # global cen
        # big_points1 = [
        #     (0.261919, 0.715625),
        #     (0.59157, 0.502083),
        #     (0.484012, 0.702604),
        #     (0.698837, 0.301562),
        #     (0.368605, 0.313542)
        # ]
        big_points1 = [(point[0], point[1] * 1.66255) for point in big_points]
        mid_points1 = [(point[0], point[1] * 1.66255) for point in mid_points]
        small_points1 = [(point[0], point[1] * 1.66255) for point in small_points]

        # def cmp(p1, p2):
        #     return p1[1] > p2[1]


        # 使用sorted()函数进行排序，传入自定义的比较函数cmp
        sorted_big = sorted(big_points1, key=lambda x: x[0], reverse=True)
        sorted_mid = sorted(mid_points1, key=lambda x: x[0], reverse=True)
        sorted_small = sorted(small_points1, key=lambda x: x[0], reverse=True)
        solution = SolutionOrdered()
        res=[]
        if len(sorted_big):
            res=res+solution.the_big_turns(sorted_big, center_point)
        if len(sorted_mid):
            res=res+solution.the_big_turns(sorted_mid, center_point)
        if len(sorted_small):
            res=res+solution.the_big_turns(sorted_small, center_point)
        print(res)
        filename_num = 'num.txt'
        content_num = 'big:{}\nmid:{}\nsmall:{}'.format(big_count,mid_count,small_count)
        with open(filename_num, 'w') as file:
            file.write(content_num)


        filename_order = 'order.txt'
        result = [[item[0], item[1] / 1.66255] for item in res]
        content_order = result
        with open(filename_order, 'w') as file:
            file.write(str(content_order))

        draw_and_save_points_on_image(config.path,filename_order,r'C:\Users\15068\Desktop\456.jpg')
    # 界面设计代码
    #window = tk.Tk()
    window.title('BasicDemo')
    window.geometry('1000x550')
    model_val = tk.StringVar()
    global triggercheck_val
    triggercheck_val = tk.IntVar()

    text_number_of_devices = tk.Text(window, width=10, height=1)
    text_number_of_devices.place(x=200, y=20)
    xVariable = tkinter.StringVar()
    device_list = ttk.Combobox(window, textvariable=xVariable, width=20)
    device_list.place(x=20, y=20)
    device_list.bind("<<ComboboxSelected>>", xFunc)

    label_exposure_time = tk.Label(window, text='Exposure Time', width=15, height=1)
    label_exposure_time.place(x=20, y=350)
    text_exposure_time = tk.Text(window, width=15, height=1)
    text_exposure_time.place(x=160, y=350)

    label_gain = tk.Label(window, text='Gain', width=15, height=1)
    label_gain.place(x=20, y=400)
    text_gain = tk.Text(window, width=15, height=1)
    text_gain.place(x=160, y=400)

    label_frame_rate = tk.Label(window, text='Frame Rate', width=15, height=1)
    label_frame_rate.place(x=20, y=450)
    text_frame_rate = tk.Text(window, width=15, height=1)
    text_frame_rate.place(x=160, y=450)


    # #新增jpg图像显示
    # label_frame_rate = tk.Label(window, text='Picture', width=15, height=1)
    # label_frame_rate.place(x=265, y=20)
    # text_frame_rate = tk.Text(window, width=50, height=30)
    # text_frame_rate.place(x=300, y=50)

    #新增输入框
    label_gain = tk.Label(window, text='Robot IP', width=15, height=1)
    label_gain.place(x=700, y=50)
    text_var1 = tk.StringVar()
    text_gain_ip = tk.Entry(window, width=15, textvariable=text_var1)
    text_gain_ip.place(x=840, y=50)
    text_var1.trace_add("write", on_text1_change)

    label_gain = tk.Label(window, text='Robot Port', width=15, height=1)
    label_gain.place(x=700, y=100)
    text_var2 = tk.StringVar()
    text_gain_port = tk.Entry(window, width=15, textvariable=text_var2)
    text_gain_port.place(x=840, y=100)
    text_var2.trace_add("write", on_text2_change)
    #新增输出框
    label_gain = tk.Label(window, text='Num', width=15, height=1)
    label_gain.place(x=700, y=140)
    output_gain1 = tk.Label(window, width=15, height=3, relief="sunken")
    output_gain1.place(x=840, y=140)
    output_gain1.config(text="Output Text")
    file_path1 = 'num.txt'
    def update_output_gain1():
        # 打开文件并读取内容
        with open(file_path1, 'r') as file:
            file_content1 = file.read()
        output_gain1.config(text=file_content1)
        # 每隔一段时间检查一次文件更新
        window.after(1000, update_output_gain1)
    update_output_gain1()

    # file_path2 = 'order.txt'
    # label_gain = tk.Label(window, text='Order', width=15, height=1)
    # label_gain.place(x=700, y=210)
    # output_gain2 = tk.Label(window, width=15, height=7, relief="sunken")
    # output_gain2.place(x=840, y=210)
    # output_gain2.config(text="Output Text")
    # def update_output_gain2():
    #     # 打开文件并读取内容
    #     with open(file_path2, 'r') as file:
    #         file_content2 = file.read()
    #     output_gain2.config(text=file_content2)
    #     # 每隔一段时间检查一次文件更新
    #     window.after(1000, update_output_gain2)
    # update_output_gain2()

    #新增按钮
    btn_enum_devices = tk.Button(window, text='Connect Robot', width=35, height=1, command=connect_robot_func)
    btn_enum_devices.place(x=700, y=350)

    btn_enum_devices = tk.Button(window, text='Run Yolo', width=35, height=1, command=run_yolo_func)
    btn_enum_devices.place(x=700, y=400)
    btn_enum_devices = tk.Button(window, text='Run Robot', width=35, height=1, command=run_robot_func)
    btn_enum_devices.place(x=700, y=450)
    btn_enum_devices = tk.Button(window, text='Disconnect Robot', width=35, height=1, command=enum_devices)
    btn_enum_devices.place(x=700, y=500)

    btn_enum_devices = tk.Button(window, text='Enum Devices', width=35, height=1, command=enum_devices)
    btn_enum_devices.place(x=20, y=50)
    btn_open_device = tk.Button(window, text='Open Device', width=15, height=1, command=open_device)
    btn_open_device.place(x=20, y=100)
    btn_close_device = tk.Button(window, text='Close Device', width=15, height=1, command=close_device)
    btn_close_device.place(x=160, y=100)

    radio_continuous = tk.Radiobutton(window, text='Continuous', variable=model_val, value='continuous', width=15,
                                      height=1, command=set_triggermode)
    radio_continuous.place(x=20, y=150)
    radio_trigger = tk.Radiobutton(window, text='Trigger Mode', variable=model_val, value='triggermode', width=15,
                                   height=1, command=set_triggermode)
    radio_trigger.place(x=160, y=150)

    btn_start_grabbing = tk.Button(window, text='Start Grabbing', width=15, height=1, command=start_grabbing)
    btn_start_grabbing.place(x=20, y=200)
    btn_stop_grabbing = tk.Button(window, text='Stop Grabbing', width=15, height=1, command=stop_grabbing)
    btn_stop_grabbing.place(x=160, y=200)

    checkbtn_trigger_software = tk.Checkbutton(window, text='Tigger by Software', variable=triggercheck_val, onvalue=1,
                                               offvalue=0)
    checkbtn_trigger_software.place(x=20, y=250)
    btn_trigger_once = tk.Button(window, text='Trigger Once', width=15, height=1, command=trigger_once)
    btn_trigger_once.place(x=160, y=250)

    btn_save_bmp = tk.Button(window, text='Save as BMP', width=15, height=1, command=bmp_save)
    btn_save_bmp.place(x=20, y=300)
    btn_save_jpg = tk.Button(window, text='Save as JPG', width=15, height=1, command=jpg_save)
    btn_save_jpg.place(x=160, y=300)

    btn_get_parameter = tk.Button(window, text='Get Parameter', width=15, height=1, command=get_parameter)
    btn_get_parameter.place(x=20, y=500)
    btn_set_parameter = tk.Button(window, text='Set Parameter', width=15, height=1, command=set_parameter)
    btn_set_parameter.place(x=160, y=500)
    window.mainloop()

