upadhyaysuraj
commited on
Commit
•
57d8fc8
1
Parent(s):
b462d54
Upload 11 files
Browse files- Cars418.png +0 -0
- annotation_format_checker.py +57 -0
- app.py +93 -0
- best.pt +3 -0
- data_preparation.py +54 -0
- label_format_converter_to_yolo.py +61 -0
- predict.py +52 -0
- requirements.txt +5 -0
- train.py +36 -0
- training_epoch_accuracy_visulizer.py +32 -0
- yaml_creator.py +29 -0
Cars418.png
ADDED
annotation_format_checker.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import os
|
4 |
+
|
5 |
+
def check_yolo_annotations(image_dir, label_dir):
|
6 |
+
"""
|
7 |
+
Checks if the conversion from VOC to YOLO format is correct by plotting bounding boxes on the images.
|
8 |
+
|
9 |
+
Parameters:
|
10 |
+
image_dir (str): Directory path to the images.
|
11 |
+
label_dir (str): Directory path to the YOLO format annotations.
|
12 |
+
"""
|
13 |
+
# Get the first image file
|
14 |
+
image_files = sorted(os.listdir(image_dir))
|
15 |
+
first_image_file = image_files[0]
|
16 |
+
|
17 |
+
# Construct paths for the image and its corresponding label
|
18 |
+
image_path = os.path.join(image_dir, first_image_file)
|
19 |
+
label_path = os.path.join(label_dir, os.path.splitext(first_image_file)[0] + '.txt')
|
20 |
+
|
21 |
+
# Load the image using OpenCV
|
22 |
+
image = cv2.imread(image_path)
|
23 |
+
# Convert the image from BGR (OpenCV default) to RGB (matplotlib default)
|
24 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
25 |
+
|
26 |
+
# Read the label file to get bounding box information
|
27 |
+
with open(label_path, 'r') as f:
|
28 |
+
lines = f.readlines()
|
29 |
+
|
30 |
+
# Plot the bounding box on the image
|
31 |
+
for line in lines:
|
32 |
+
# Parse the label file line to extract bounding box information
|
33 |
+
class_id, x_center, y_center, width, height = map(float, line.strip().split())
|
34 |
+
img_height, img_width, _ = image.shape
|
35 |
+
|
36 |
+
# Convert YOLO format to bounding box format
|
37 |
+
x_center *= img_width
|
38 |
+
y_center *= img_height
|
39 |
+
width *= img_width
|
40 |
+
height *= img_height
|
41 |
+
|
42 |
+
# Calculate the top-left and bottom-right coordinates of the bounding box
|
43 |
+
x1 = int(x_center - width / 2)
|
44 |
+
y1 = int(y_center - height / 2)
|
45 |
+
x2 = int(x_center + width / 2)
|
46 |
+
y2 = int(y_center + height / 2)
|
47 |
+
|
48 |
+
# Draw the bounding box on the image using a green rectangle
|
49 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
50 |
+
|
51 |
+
# Display the image with bounding box using matplotlib
|
52 |
+
plt.imshow(image)
|
53 |
+
plt.axis('off') # Hide the axis
|
54 |
+
plt.show() # Display the image
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
check_yolo_annotations('Dataset/images', 'Dataset/yolo_annotations')
|
app.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
from ultralytics import YOLO
|
5 |
+
import pytesseract
|
6 |
+
|
7 |
+
def load_model(model_path):
|
8 |
+
"""
|
9 |
+
Loads the YOLO model from the specified path.
|
10 |
+
|
11 |
+
Parameters:
|
12 |
+
model_path (str): Path to the YOLO model file.
|
13 |
+
|
14 |
+
Returns:
|
15 |
+
YOLO: Loaded YOLO model.
|
16 |
+
"""
|
17 |
+
return YOLO(model_path)
|
18 |
+
|
19 |
+
def predict_and_plot(image, model):
|
20 |
+
"""
|
21 |
+
Predicts and plots the bounding boxes on the given image using the trained YOLO model.
|
22 |
+
Also performs OCR on the detected bounding boxes to extract text.
|
23 |
+
|
24 |
+
Parameters:
|
25 |
+
image (numpy.ndarray): Input image.
|
26 |
+
model (YOLO): The trained YOLO model.
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
numpy.ndarray: Image with bounding boxes drawn.
|
30 |
+
str: Detected text from the bounding boxes.
|
31 |
+
"""
|
32 |
+
# Perform prediction on the input image using the model
|
33 |
+
results = model.predict(image, device='cpu')
|
34 |
+
|
35 |
+
# Convert the image from BGR (OpenCV default) to RGB (matplotlib default)
|
36 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
37 |
+
|
38 |
+
detected_texts = []
|
39 |
+
|
40 |
+
# Extract the bounding boxes and labels from the results
|
41 |
+
for result in results:
|
42 |
+
for box in result.boxes:
|
43 |
+
# Get the coordinates of the bounding box
|
44 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
45 |
+
# Get the confidence score of the prediction
|
46 |
+
confidence = box.conf[0]
|
47 |
+
|
48 |
+
# Draw the bounding box on the image
|
49 |
+
cv2.rectangle(image_rgb, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
50 |
+
# Draw the confidence score near the bounding box
|
51 |
+
cv2.putText(image_rgb, f'{confidence*100:.2f}%', (x1, y1 - 10),
|
52 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
|
53 |
+
|
54 |
+
# Crop the bounding box from the image for OCR
|
55 |
+
roi = image[y1:y2, x1:x2]
|
56 |
+
|
57 |
+
# Perform OCR on the cropped image
|
58 |
+
text = pytesseract.image_to_string(roi, config='--psm 6')
|
59 |
+
detected_texts.append(text.strip())
|
60 |
+
|
61 |
+
detected_text = "\n".join(detected_texts)
|
62 |
+
return image_rgb, detected_text
|
63 |
+
|
64 |
+
def main(image):
|
65 |
+
"""
|
66 |
+
Main function to handle the prediction and plotting.
|
67 |
+
|
68 |
+
Parameters:
|
69 |
+
image (numpy.ndarray): Input image.
|
70 |
+
|
71 |
+
Returns:
|
72 |
+
numpy.ndarray: Image with bounding boxes drawn.
|
73 |
+
str: Detected text from the bounding boxes.
|
74 |
+
"""
|
75 |
+
# Load the model (ensure 'best.pt' is in the correct path)
|
76 |
+
model = load_model('best.pt')
|
77 |
+
return predict_and_plot(image, model)
|
78 |
+
|
79 |
+
# Create the Gradio interface
|
80 |
+
iface = gr.Interface(
|
81 |
+
fn=main,
|
82 |
+
inputs=gr.Image(type="numpy", label="Upload an Image"),
|
83 |
+
outputs=[
|
84 |
+
gr.Image(type="numpy", label="Output Image with Bounding Boxes"),
|
85 |
+
gr.Textbox(label="Detected Text")
|
86 |
+
],
|
87 |
+
title="License Plate Detection and OCR",
|
88 |
+
description="Upload an image to detect license plates and extract text using YOLO and Tesseract OCR."
|
89 |
+
)
|
90 |
+
|
91 |
+
# Launch the Gradio app
|
92 |
+
if __name__ == "__main__":
|
93 |
+
iface.launch()
|
best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4d8b7cf6411a48baad769b7e94c9c70517d06169d016ba7b90224fa9fd92bb0
|
3 |
+
size 6216793
|
data_preparation.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
from sklearn.model_selection import train_test_split
|
4 |
+
|
5 |
+
def prepare_data(dataset_path):
|
6 |
+
"""
|
7 |
+
Prepares the dataset by splitting it into training, validation, and test sets.
|
8 |
+
|
9 |
+
Parameters:
|
10 |
+
dataset_path (str): Path to the dataset.
|
11 |
+
"""
|
12 |
+
images_path = os.path.join(dataset_path, 'images')
|
13 |
+
yolo_annotations_path = os.path.join(dataset_path, 'yolo_annotations')
|
14 |
+
|
15 |
+
# Paths to split datasets
|
16 |
+
train_images_path = os.path.join(dataset_path, 'YOLO', 'train', 'images')
|
17 |
+
val_images_path = os.path.join(dataset_path, 'YOLO', 'val', 'images')
|
18 |
+
test_images_path = os.path.join(dataset_path, 'YOLO', 'test', 'images')
|
19 |
+
|
20 |
+
train_annotations_path = os.path.join(dataset_path, 'YOLO', 'train', 'labels')
|
21 |
+
val_annotations_path = os.path.join(dataset_path, 'YOLO', 'val', 'labels')
|
22 |
+
test_annotations_path = os.path.join(dataset_path, 'YOLO', 'test', 'labels')
|
23 |
+
|
24 |
+
# Create directories
|
25 |
+
os.makedirs(train_images_path, exist_ok=True)
|
26 |
+
os.makedirs(val_images_path, exist_ok=True)
|
27 |
+
os.makedirs(test_images_path, exist_ok=True)
|
28 |
+
os.makedirs(train_annotations_path, exist_ok=True)
|
29 |
+
os.makedirs(val_annotations_path, exist_ok=True)
|
30 |
+
os.makedirs(test_annotations_path, exist_ok=True)
|
31 |
+
|
32 |
+
# Get list of all images
|
33 |
+
all_images = [f for f in os.listdir(images_path) if f.endswith('.png')]
|
34 |
+
train_images, val_test_images = train_test_split(all_images, test_size=0.3, random_state=42)
|
35 |
+
val_images, test_images = train_test_split(val_test_images, test_size=0.33, random_state=42)
|
36 |
+
|
37 |
+
# Function to copy images and annotations
|
38 |
+
def copy_files(image_list, dest_image_path, dest_label_path):
|
39 |
+
for image in image_list:
|
40 |
+
image_path = os.path.join(images_path, image)
|
41 |
+
label_path = os.path.join(yolo_annotations_path, image.replace('.png', '.txt'))
|
42 |
+
|
43 |
+
shutil.copy(image_path, dest_image_path)
|
44 |
+
shutil.copy(label_path, dest_label_path)
|
45 |
+
|
46 |
+
# Copy files to respective directories
|
47 |
+
copy_files(train_images, train_images_path, train_annotations_path)
|
48 |
+
copy_files(val_images, val_images_path, val_annotations_path)
|
49 |
+
copy_files(test_images, test_images_path, test_annotations_path)
|
50 |
+
|
51 |
+
print("Dataset split into training, validation, and test sets.")
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
prepare_data('Dataset')
|
label_format_converter_to_yolo.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import xml.etree.ElementTree as ET
|
3 |
+
|
4 |
+
def convert_to_yolo(annotation_path, yolo_annotation_path):
|
5 |
+
"""
|
6 |
+
Converts PASCAL VOC annotations to YOLO format.
|
7 |
+
|
8 |
+
Parameters:
|
9 |
+
annotation_path (str): Path to the PASCAL VOC annotation file.
|
10 |
+
yolo_annotation_path (str): Path to save the YOLO format annotation file.
|
11 |
+
"""
|
12 |
+
tree = ET.parse(annotation_path)
|
13 |
+
root = tree.getroot()
|
14 |
+
|
15 |
+
# Get image dimensions
|
16 |
+
size = root.find('size')
|
17 |
+
width = int(size.find('width').text)
|
18 |
+
height = int(size.find('height').text)
|
19 |
+
|
20 |
+
# Open corresponding YOLO annotation file
|
21 |
+
yolo_annotation = open(yolo_annotation_path, 'w')
|
22 |
+
|
23 |
+
for obj in root.findall('object'):
|
24 |
+
class_id = 0 # Since all objects are 'license_plate', class_id is 0
|
25 |
+
bndbox = obj.find('bndbox')
|
26 |
+
|
27 |
+
xmin = int(bndbox.find('xmin').text)
|
28 |
+
ymin = int(bndbox.find('ymin').text)
|
29 |
+
xmax = int(bndbox.find('xmax').text)
|
30 |
+
ymax = int(bndbox.find('ymax').text)
|
31 |
+
|
32 |
+
# Convert to YOLO format
|
33 |
+
x_center = (xmin + xmax) / 2.0 / width
|
34 |
+
y_center = (ymin + ymax) / 2.0 / height
|
35 |
+
bbox_width = (xmax - xmin) / width
|
36 |
+
bbox_height = (ymax - ymin) / height
|
37 |
+
|
38 |
+
yolo_annotation.write(f"{class_id} {x_center} {y_center} {bbox_width} {bbox_height}\n")
|
39 |
+
|
40 |
+
yolo_annotation.close()
|
41 |
+
|
42 |
+
def convert_all_annotations(annotations_path, yolo_annotations_path):
|
43 |
+
"""
|
44 |
+
Converts all PASCAL VOC annotations in a directory to YOLO format.
|
45 |
+
|
46 |
+
Parameters:
|
47 |
+
annotations_path (str): Directory path to the PASCAL VOC annotations.
|
48 |
+
yolo_annotations_path (str): Directory path to save the YOLO format annotations.
|
49 |
+
"""
|
50 |
+
os.makedirs(yolo_annotations_path, exist_ok=True)
|
51 |
+
|
52 |
+
for xml_file in os.listdir(annotations_path):
|
53 |
+
if xml_file.endswith('.xml'):
|
54 |
+
xml_path = os.path.join(annotations_path, xml_file)
|
55 |
+
yolo_annotation_path = os.path.join(yolo_annotations_path, xml_file.replace('.xml', '.txt'))
|
56 |
+
convert_to_yolo(xml_path, yolo_annotation_path)
|
57 |
+
|
58 |
+
print("Conversion to YOLO format completed.")
|
59 |
+
|
60 |
+
if __name__ == "__main__":
|
61 |
+
convert_all_annotations('Dataset', 'Dataset/yolo_annotations')
|
predict.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
from ultralytics import YOLO
|
4 |
+
import pytesseract
|
5 |
+
from pytesseract import Output
|
6 |
+
|
7 |
+
def predict_and_plot(model, path_test_car):
|
8 |
+
"""
|
9 |
+
Predicts and plots the bounding boxes on the given test image using the trained YOLO model.
|
10 |
+
Also performs OCR on the detected bounding boxes to extract text.
|
11 |
+
|
12 |
+
Parameters:
|
13 |
+
model (YOLO): The trained YOLO model.
|
14 |
+
path_test_car (str): Path to the test image file.
|
15 |
+
"""
|
16 |
+
# Perform prediction on the test image using the model
|
17 |
+
results = model.predict(path_test_car, device='CPU')
|
18 |
+
|
19 |
+
# Load the image using OpenCV
|
20 |
+
image = cv2.imread(path_test_car)
|
21 |
+
# Convert the image from BGR (OpenCV default) to RGB (matplotlib default)
|
22 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
23 |
+
|
24 |
+
# Extract the bounding boxes and labels from the results
|
25 |
+
for result in results:
|
26 |
+
for box in result.boxes:
|
27 |
+
# Get the coordinates of the bounding box
|
28 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
29 |
+
# Get the confidence score of the prediction
|
30 |
+
confidence = box.conf[0]
|
31 |
+
|
32 |
+
# Draw the bounding box on the image
|
33 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
34 |
+
# Draw the confidence score near the bounding box
|
35 |
+
cv2.putText(image, f'{confidence*100:.2f}%', (x1, y1 - 10),
|
36 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
|
37 |
+
|
38 |
+
# Crop the bounding box from the image for OCR
|
39 |
+
roi = image[y1:y2, x1:x2]
|
40 |
+
|
41 |
+
# Perform OCR on the cropped image
|
42 |
+
text = pytesseract.image_to_string(roi, config='--psm 6')
|
43 |
+
print(f"Detected text: {text}")
|
44 |
+
|
45 |
+
# Plot the image with bounding boxes
|
46 |
+
plt.imshow(image)
|
47 |
+
plt.axis('off') # Hide the axis
|
48 |
+
plt.show() # Display the image
|
49 |
+
|
50 |
+
if __name__ == "__main__":
|
51 |
+
model = YOLO('best.pt')
|
52 |
+
predict_and_plot(model, "Dataset/images/Cars9.png")
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow
|
2 |
+
opencv-python-headless
|
3 |
+
tesseract
|
4 |
+
pytesseract
|
5 |
+
ultralytics
|
train.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
|
3 |
+
def train_model(model_path, data_path, epochs, batch_size, device, img_size, save_dir):
|
4 |
+
"""
|
5 |
+
Trains the YOLO model on the specified dataset.
|
6 |
+
|
7 |
+
Parameters:
|
8 |
+
model_path (str): Path to the YOLO model.
|
9 |
+
data_path (str): Path to the dataset configuration file.
|
10 |
+
epochs (int): Number of training epochs.
|
11 |
+
batch_size (int): Batch size for training.
|
12 |
+
device (str): Device to use for training ('cpu' or 'cuda').
|
13 |
+
img_size (int): Image size for training.
|
14 |
+
save_dir (str): Directory to save the trained model.
|
15 |
+
"""
|
16 |
+
model = YOLO('yolov8n.pt')
|
17 |
+
model.train(
|
18 |
+
data=data_path,
|
19 |
+
epochs=epochs,
|
20 |
+
batch=batch_size,
|
21 |
+
device=device,
|
22 |
+
imgsz=img_size,
|
23 |
+
cache=True,
|
24 |
+
save_dir=save_dir
|
25 |
+
)
|
26 |
+
|
27 |
+
if __name__ == "__main__":
|
28 |
+
train_model(
|
29 |
+
model_path='yolov8n.pt',
|
30 |
+
data_path='Dataset/datasets.yaml',
|
31 |
+
epochs=100,
|
32 |
+
batch_size=16,
|
33 |
+
device='cuda',
|
34 |
+
img_size=320,
|
35 |
+
save_dir='Dataset/model'
|
36 |
+
)
|
training_epoch_accuracy_visulizer.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
|
4 |
+
def plot_training_results(csv_path):
|
5 |
+
"""
|
6 |
+
Plots the training accuracy over epochs from the training results CSV file.
|
7 |
+
|
8 |
+
Parameters:
|
9 |
+
csv_path (str): Path to the training results CSV file.
|
10 |
+
"""
|
11 |
+
# Load the training results from the CSV file
|
12 |
+
results = pd.read_csv(csv_path)
|
13 |
+
results.columns = results.columns.str.strip() # Remove any leading/trailing whitespace from column names
|
14 |
+
|
15 |
+
# Extract epochs and accuracy metrics
|
16 |
+
epochs = results.index + 1 # Epochs are zero-indexed, so add 1
|
17 |
+
mAP_0_5 = results['metrics/mAP50(B)'] # Mean Average Precision at IoU=0.5
|
18 |
+
mAP_0_5_0_95 = results['metrics/mAP50-95(B)'] # Mean Average Precision at IoU=0.5:0.95
|
19 |
+
|
20 |
+
# Plot the accuracy over epochs
|
21 |
+
plt.figure(figsize=(10, 5))
|
22 |
+
plt.plot(epochs, mAP_0_5, label='mAP@0.5')
|
23 |
+
plt.plot(epochs, mAP_0_5_0_95, label='mAP@0.5:0.95')
|
24 |
+
plt.xlabel('Epoch')
|
25 |
+
plt.ylabel('Accuracy')
|
26 |
+
plt.title('Accuracy Over Epochs')
|
27 |
+
plt.legend()
|
28 |
+
plt.grid(True)
|
29 |
+
plt.show()
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
plot_training_results('runs/detect/train/results.csv')
|
yaml_creator.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def create_yaml_file(dataset_path):
|
2 |
+
"""
|
3 |
+
Creates a dataset configuration YAML file for YOLO training.
|
4 |
+
|
5 |
+
Parameters:
|
6 |
+
dataset_path (str): Path to the dataset.
|
7 |
+
"""
|
8 |
+
datasets_yaml = '''
|
9 |
+
path: /Dataset/YOLO
|
10 |
+
|
11 |
+
train: train/images
|
12 |
+
val: val/images
|
13 |
+
test: test/images
|
14 |
+
|
15 |
+
# number of classes
|
16 |
+
nc: 1
|
17 |
+
|
18 |
+
# class names
|
19 |
+
names: ['license_plate']
|
20 |
+
'''
|
21 |
+
|
22 |
+
# Write the content to the datasets.yaml file
|
23 |
+
with open(os.path.join(dataset_path,'datasets.yaml'), 'w') as file:
|
24 |
+
file.write(datasets_yaml)
|
25 |
+
|
26 |
+
print("datasets.yaml file created.")
|
27 |
+
|
28 |
+
if __name__ == "__main__":
|
29 |
+
create_yaml_file('Dataset')
|