#https://blog.csdn.net/ldz1221/article/details/137476846
#https://blog.csdn.net/weixin_48994268/article/details/115282688?spm=1001.2101.3001.6650.13&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7Ebaidujs_baidulandingword%7ECtr-13-115282688-blog-129312409.235%5Ev43%5Epc_blog_bottom_relevance_base6&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7Ebaidujs_baidulandingword%7ECtr-13-115282688-blog-129312409.235%5Ev43%5Epc_blog_bottom_relevance_base6&utm_relevant_index=23

#https://devpress.csdn.net/shanghai/68ad1ef5080e555a88de034b.html?spm=1001.2101.3001.6650.2&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7Ebaidujs_baidulandingword%7Eactivity-2-121783848-blog-115282688.235%5Ev43%5Epc_blog_bottom_relevance_base6&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7Ebaidujs_baidulandingword%7Eactivity-2-121783848-blog-115282688.235%5Ev43%5Epc_blog_bottom_relevance_base6&utm_relevant_index=5


import os
import random
import shutil
import xml.etree.ElementTree as ET
 
 
# 定义类别
classes = ["dog","cat", "elephant","giraffe", "horse","bird"]   #标签类别
 
# 获取当前目录
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
 
# 将边界框的坐标从绝对值转换为相对值
def convert(size, box):
    dw = 1. / size[0]
    dh = 1. / size[1]
    x = (box[0] + box[1]) / 2.0    # (x_min + x_max) / 2.0
    y = (box[2] + box[3]) / 2.0    # (y_min + y_max) / 2.0
    w = box[1] - box[0]   # x_max - x_min
    h = box[3] - box[2]   # y_max - y_min
    x = x * dw
    w = w * dw
    y = y * dh
    h = h * dh
    return (x, y, w, h)
 
# 读取XML文件，转换为YOLO格式，并写入TXT文件
def convert_annotation(image_id):
    in_file = open('./xml_dataset/%s.xml' % (image_id), encoding='UTF-8')   #xml文件相对路径
    out_file = open('./txt_dataset/%s.txt' % (image_id), 'w')  # 生成txt格式文件   #txt文件相对路径
    tree = ET.parse(in_file)
    root = tree.getroot()
    size = root.find('size')
    w = int(size.find('width').text)
    h = int(size.find('height').text)
 
    for obj in root.iter('object'):
        cls = obj.find('name').text
        if cls not in classes:
            continue
        cls_id = classes.index(cls)
        xmlbox = obj.find('bndbox')
        b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
             float(xmlbox.find('ymax').text))
        bb = convert((w, h), b)
        out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
 
# 转换所有XML文件为TXT文件
xml_path = os.path.join(CURRENT_DIR, './xml_dataset/')  #xml文件相对路径
img_xmls = os.listdir(xml_path)
for img_xml in img_xmls:
    label_name = img_xml.split('.')[0]
    print(label_name)
    convert_annotation(label_name)
 
# 划分数据集
def split_dataset(image_dataset_dir, text_dataset_dir, train_image_dir, train_text_dir, test_image_dir, test_text_dir,
                  split_ratio=0.8):
    # 创建训练集和测试集目录
    if not os.path.exists(train_image_dir):
        os.makedirs(train_image_dir)
    if not os.path.exists(train_text_dir):
        os.makedirs(train_text_dir)
    if not os.path.exists(test_image_dir):
        os.makedirs(test_image_dir)
    if not os.path.exists(test_text_dir):
        os.makedirs(test_text_dir)
    if not os.path.exists(text_dataset_dir):
        os.makedirs(text_dataset_dir)
 
    # 获取图像数据集中所有文件的列表
    image_file_list = os.listdir(image_dataset_dir)
    # 获取文本数据集中所有文件的列表
    text_file_list = os.listdir(text_dataset_dir)
 
    # 打乱图像文件列表顺序
    random.shuffle(image_file_list)
 
    # 计算训练集和测试集的样本数量
    num_train_samples = int(len(image_file_list) * split_ratio)
    num_test_samples = len(image_file_list) - num_train_samples
 
    # 将图像文件列表中的前 num_train_samples 个文件复制到训练集目录
    for file_name in image_file_list[:num_train_samples]:
        src_image_path = os.path.join(image_dataset_dir, file_name)
        dst_image_path = os.path.join(train_image_dir, file_name)
        shutil.copyfile(src_image_path, dst_image_path)
 
    # 将图像文件列表中的剩余文件复制到测试集目录
    for file_name in image_file_list[num_train_samples:]:
        src_image_path = os.path.join(image_dataset_dir, file_name)
        dst_image_path = os.path.join(test_image_dir, file_name)
        shutil.copyfile(src_image_path, dst_image_path)
 
    # 根据相同的文件名将对应的文本文件复制到相应的训练集和测试集目录中
    for file_name in image_file_list:
        src_text_path = os.path.join(text_dataset_dir, os.path.splitext(file_name)[0] + ".txt")
        if os.path.exists(src_text_path):
            if file_name in image_file_list[:num_train_samples]:
                dst_text_path = os.path.join(train_text_dir, os.path.splitext(file_name)[0] + ".txt")
                shutil.copyfile(src_text_path, dst_text_path)
            else:
                dst_text_path = os.path.join(test_text_dir, os.path.splitext(file_name)[0] + ".txt")
                shutil.copyfile(src_text_path, dst_text_path)
 
# 定义数据集目录
xml_dataset_dir = "xml_dataset"      # XML文件目录
text_dataset_dir = "txt_dataset"    # txt存放目录
image_dataset_dir = "image_dataset"  # image存放目录
train_image_dir = "images/train"     # 训练集图片
test_image_dir = "images/val"        # 验证集图片
train_text_dir = "labels/train"      # 训练集txt
test_text_dir = "labels/val"         # 验证集txt
 
# 划分数据集
split_dataset(image_dataset_dir, text_dataset_dir, train_image_dir, train_text_dir, test_image_dir, test_text_dir)






##########################################################################################################
#推理
import cv2
import numpy as np
import onnxruntime
 
CLASSES = ["dog","cat", "elephant","giraffe", "horse","bird"]  # 请将您的类别列表补充完整
#建议顺序与coco128中的标签顺序一致
 
class YOLOV5():
    def __init__(self, onnxpath):
        self.onnx_session = onnxruntime.InferenceSession(onnxpath)
        self.input_name = self.get_input_name()
        self.output_name = self.get_output_name()
 
    def get_input_name(self):
        input_name = []
        for node in self.onnx_session.get_inputs():
            input_name.append(node.name)
        return input_name
 
    def get_output_name(self):
        output_name = []
        for node in self.onnx_session.get_outputs():
            output_name.append(node.name)
        return output_name
 
    def get_input_feed(self, img_tensor):
        input_feed = {}
        for name in self.input_name:
            input_feed[name] = img_tensor
        return input_feed
 
    def inference(self, image):
        or_img = cv2.resize(image, (640, 640))
        img = or_img[:, :, ::-1].transpose(2, 0, 1)  # BGR2RGB和HWC2CHW
        img = img.astype(dtype=np.float32)
        img /= 255.0
        img = np.expand_dims(img, axis=0)
        input_feed = self.get_input_feed(img)
        pred = self.onnx_session.run(None, input_feed)[0]
        return pred, or_img
 
def xywh2xyxy(x):
    y = np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2
    y[:, 1] = x[:, 1] - x[:, 3] / 2
    y[:, 2] = x[:, 0] + x[:, 2] / 2
    y[:, 3] = x[:, 1] + x[:, 3] / 2
    return y
 
 
def filter_box(org_box, conf_thres, iou_thres):
    org_box = np.squeeze(org_box)
    conf = org_box[..., 4] > conf_thres
    box = org_box[conf == True]
    cls_cinf = box[..., 5:]
    cls = []
    for i in range(len(cls_cinf)):
        cls.append(int(np.argmax(cls_cinf[i])))
    all_cls = list(set(cls))
    output = []
    for i in range(len(all_cls)):
        curr_cls = all_cls[i]
        curr_cls_box = []
        curr_out_box = []
        for j in range(len(cls)):
            if cls[j] == curr_cls:
                box[j][5] = curr_cls
                curr_cls_box.append(box[j][:6])
        curr_cls_box = np.array(curr_cls_box)
        curr_cls_box = xywh2xyxy(curr_cls_box)
        boxes = curr_cls_box[:, :4]
        scores = curr_cls_box[:, 4]
        indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_thres, iou_thres)
        for idx in indices:
            curr_out_box.append(curr_cls_box[idx])
        output.extend(curr_out_box)
    output = np.array(output)
    return output
 
 
def draw(image, box_data):
    # 中文标签映射
    ENGLISH_TO_CHINESE = {
        'bird': '鸟',
        'elephant': '大象',
        'cat': '猫',
        'dog': '狗',
        'giraffe': '长颈鹿',
        'horse': '马',
        # 如果有其他标签，继续添加
    }
 
    if len(box_data) == 0:
        print("没有检测到任何对象。")
        return
 
    boxes = box_data[..., :4].astype(np.int32)
    scores = box_data[..., 4]
    classes = box_data[..., 5].astype(np.int32)
 
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
 
        # 获取中文标签，如果不存在则使用英文标签
        chinese_label = ENGLISH_TO_CHINESE.get(CLASSES[cl], CLASSES[cl])
 
        # 绘制框时显示英文标签
        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
                    (top, left),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0, 0, 255), 2)
 
        # 终端输出中文标签
        print('类别: {}'.format(chinese_label))
 
 
def main():
    onnx_path = 'runs/train/exp/weights/best.onnx'
    model = YOLOV5(onnx_path)
    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        output, or_img = model.inference(frame)
        outbox = filter_box(output, 0.5, 0.5)
        draw(or_img, outbox)
        cv2.imshow('Video', or_img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
 
if __name__ == "__main__":
    main()
 