File size: 5,377 Bytes
c3262a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import os
import json
import random
import argparse
import shutil
from tqdm import tqdm
import yaml
import utils
from safe_executor import SafeExecutor
class_mapping = {
"lm_dashed": 1,
"lm_solid": 0,
"lm_botts_dot": 0, # Treating as lm_solid
"lm_shaded": 0 # Treating as lm_solid
}
def extract_base_dataset(from_res):
os.system(f"python extract_base_dataset.py --from_res {from_res}")
def remove_cache_dir(cache_dir):
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
def create_cache_dir(cache_dir):
utils.check_and_create_dir(cache_dir)
def load_annotations(file):
with open(file) as f:
return json.load(f)
def convert_and_save_annotations(annotated_files, cache_dir, from_res):
width, height = map(int, from_res.split('x'))
for file in tqdm(annotated_files, desc="Converting and saving annotations"):
base_name = os.path.basename(file)
output_file_path = os.path.join(cache_dir, f'{base_name}.txt')
lane_annotations_path = os.path.join(file, "annotations", "lane_markings.json")
try:
lane_annotations = load_annotations(lane_annotations_path)
except FileNotFoundError:
with open(output_file_path, 'w') as f:
f.write("")
continue
yolo_annotations = utils.convert_lane_annotations_to_yolo_seg_format(lane_annotations, class_mapping, width, height)
with open(output_file_path, 'w') as f:
if yolo_annotations:
for line in yolo_annotations:
f.write(f"{line}\n")
else:
# Create empty file if no annotations
f.write("")
def split_files(list_of_files, train_split=0.8):
random.shuffle(list_of_files)
split_index = int(len(list_of_files) * train_split)
return list_of_files[:split_index], list_of_files[split_index:]
def prepare_yolo_dataset(train_files, val_files, from_res):
dataset_dir = os.path.join(utils.ROOT_DIR, "dataset", f"yolo_seg_lane_{from_res}")
train_dir = os.path.join(dataset_dir, "train")
val_dir = os.path.join(dataset_dir, "val")
if os.path.exists(dataset_dir):
user_input = input(f"The dataset directory {dataset_dir} already exists. Do you want to remove it? (y/n): ")
if user_input.lower() == 'y':
shutil.rmtree(dataset_dir)
else:
print("Exiting without making changes.")
return
utils.check_and_create_dir(train_dir)
utils.check_and_create_dir(val_dir)
for file in tqdm(train_files, desc="Preparing YOLO train dataset"):
base_name = os.path.splitext(os.path.basename(file))[0]
image_file = os.path.join(utils.ROOT_DIR, "dataset", f'{from_res}_images', f'{base_name}.jpg')
if os.path.exists(image_file):
shutil.copy(os.path.join(utils.ROOT_DIR, '.cache', f'{from_res}_annotations', file), train_dir)
shutil.copy(image_file, train_dir)
for file in tqdm(val_files, desc="Preparing YOLO val dataset"):
base_name = os.path.splitext(os.path.basename(file))[0]
image_file = os.path.join(utils.ROOT_DIR, "dataset", f'{from_res}_images', f'{base_name}.jpg')
if os.path.exists(image_file):
shutil.copy(os.path.join(utils.ROOT_DIR, '.cache', f'{from_res}_annotations', file), val_dir)
shutil.copy(image_file, val_dir)
create_yaml_file(dataset_dir, train_dir, val_dir)
def create_yaml_file(dataset_dir, train_dir, val_dir):
yaml_content = {
'path': dataset_dir,
'train': 'train', # relative to 'path'
'val': 'val', # relative to 'path'
'names': {
0: 'lm_solid',
1: 'lm_dashed',
}
}
yaml_file_path = os.path.join(dataset_dir, 'dataset.yaml')
with open(yaml_file_path, 'w') as yaml_file:
yaml.dump(yaml_content, yaml_file, default_flow_style=False)
def main():
parser = argparse.ArgumentParser()
supported_resolutions = utils.get_supported_resolutions()
str_supported_resolutions = ', '.join(supported_resolutions)
parser.add_argument('--from_res', type=str, help=f'Choose available dataset: {str_supported_resolutions}', required=True)
parser.add_argument('--cache_enabled', type=bool, help='Enable caching', default=False)
args = parser.parse_args()
if args.from_res not in supported_resolutions:
print(f"Unsupported resolution. Supported resolutions are: {str_supported_resolutions}")
exit(1)
extract_base_dataset(args.from_res)
annotated_files = utils.get_annotated_files_list()
cache_dir = os.path.join(utils.ROOT_DIR, ".cache", f"{args.from_res}_annotations")
if not args.cache_enabled:
remove_cache_dir(cache_dir)
create_cache_dir(cache_dir)
paths_to_cleanup = [cache_dir, os.path.join(utils.ROOT_DIR, "dataset", f"yolo_seg_lane_{args.from_res}")]
with SafeExecutor(paths_to_cleanup):
convert_and_save_annotations(annotated_files, cache_dir, args.from_res)
list_of_files = os.listdir(cache_dir)
train_files, val_files = split_files(list_of_files)
prepare_yolo_dataset(train_files, val_files, args.from_res)
print("Annotations extracted and YOLO dataset prepared successfully")
if __name__ == "__main__":
main()
|