glenn-jocher
commited on
Commit
β’
0ad6301
1
Parent(s):
f8e1148
Update script headers (#4163)
Browse files* Update download script headers
* cleanup
* bug fix attempt
* bug fix attempt2
* bug fix attempt3
* cleanup
- data/scripts/download_weights.sh +7 -2
- data/scripts/get_coco.sh +7 -7
- data/scripts/get_coco128.sh +8 -8
- train.py +3 -2
- utils/autoanchor.py +4 -6
- utils/datasets.py +1 -1
- utils/loggers/wandb/log_dataset.py +2 -2
- utils/loggers/wandb/wandb_utils.py +3 -3
- val.py +1 -1
data/scripts/download_weights.sh
CHANGED
@@ -1,7 +1,12 @@
|
|
1 |
#!/bin/bash
|
|
|
2 |
# Download latest models from https://github.com/ultralytics/yolov5/releases
|
3 |
-
#
|
4 |
-
#
|
|
|
|
|
|
|
|
|
5 |
|
6 |
python - <<EOF
|
7 |
from utils.google_utils import attempt_download
|
|
|
1 |
#!/bin/bash
|
2 |
+
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
|
3 |
# Download latest models from https://github.com/ultralytics/yolov5/releases
|
4 |
+
# YOLOv5 π example usage: bash path/to/download_weights.sh
|
5 |
+
# parent
|
6 |
+
# βββ yolov5
|
7 |
+
# βββ yolov5s.pt β downloads here
|
8 |
+
# βββ yolov5m.pt
|
9 |
+
# βββ ...
|
10 |
|
11 |
python - <<EOF
|
12 |
from utils.google_utils import attempt_download
|
data/scripts/get_coco.sh
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
#!/bin/bash
|
2 |
-
#
|
3 |
-
# Download
|
4 |
-
#
|
5 |
-
#
|
6 |
-
#
|
7 |
-
#
|
8 |
-
#
|
9 |
|
10 |
# Download/unzip labels
|
11 |
d='../datasets' # unzip directory
|
|
|
1 |
#!/bin/bash
|
2 |
+
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
|
3 |
+
# Download COCO 2017 dataset http://cocodataset.org
|
4 |
+
# YOLOv5 π example usage: bash data/scripts/get_coco.sh
|
5 |
+
# parent
|
6 |
+
# βββ yolov5
|
7 |
+
# βββ datasets
|
8 |
+
# βββ coco β downloads here
|
9 |
|
10 |
# Download/unzip labels
|
11 |
d='../datasets' # unzip directory
|
data/scripts/get_coco128.sh
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
#!/bin/bash
|
2 |
-
#
|
3 |
-
# Download
|
4 |
-
#
|
5 |
-
#
|
6 |
-
#
|
7 |
-
#
|
8 |
-
#
|
9 |
|
10 |
# Download/unzip images and labels
|
11 |
-
d='../' # unzip directory
|
12 |
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
13 |
f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB
|
14 |
echo 'Downloading' $url$f ' ...'
|
|
|
1 |
#!/bin/bash
|
2 |
+
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
|
3 |
+
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
|
4 |
+
# YOLOv5 π example usage: bash data/scripts/get_coco128.sh
|
5 |
+
# parent
|
6 |
+
# βββ yolov5
|
7 |
+
# βββ datasets
|
8 |
+
# βββ coco128 β downloads here
|
9 |
|
10 |
# Download/unzip images and labels
|
11 |
+
d='../datasets' # unzip directory
|
12 |
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
13 |
f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB
|
14 |
echo 'Downloading' $url$f ' ...'
|
train.py
CHANGED
@@ -78,8 +78,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
78 |
plots = not evolve # create plots
|
79 |
cuda = device.type != 'cpu'
|
80 |
init_seeds(1 + RANK)
|
81 |
-
with open(data) as f:
|
82 |
-
data_dict = yaml.safe_load(f)
|
|
|
83 |
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
|
84 |
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
85 |
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
|
|
|
78 |
plots = not evolve # create plots
|
79 |
cuda = device.type != 'cpu'
|
80 |
init_seeds(1 + RANK)
|
81 |
+
with open(data, encoding='ascii', errors='ignore') as f:
|
82 |
+
data_dict = yaml.safe_load(f)
|
83 |
+
|
84 |
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
|
85 |
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
86 |
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
|
utils/autoanchor.py
CHANGED
@@ -60,11 +60,11 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
|
60 |
print('') # newline
|
61 |
|
62 |
|
63 |
-
def kmean_anchors(
|
64 |
""" Creates kmeans-evolved anchors from training dataset
|
65 |
|
66 |
Arguments:
|
67 |
-
|
68 |
n: number of anchors
|
69 |
img_size: image size used for training
|
70 |
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
|
@@ -103,13 +103,11 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
|
|
103 |
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
104 |
return k
|
105 |
|
106 |
-
if isinstance(
|
107 |
-
with open(
|
108 |
data_dict = yaml.safe_load(f) # model dict
|
109 |
from utils.datasets import LoadImagesAndLabels
|
110 |
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
|
111 |
-
else:
|
112 |
-
dataset = path # dataset
|
113 |
|
114 |
# Get label wh
|
115 |
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
|
|
60 |
print('') # newline
|
61 |
|
62 |
|
63 |
+
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
|
64 |
""" Creates kmeans-evolved anchors from training dataset
|
65 |
|
66 |
Arguments:
|
67 |
+
dataset: path to data.yaml, or a loaded dataset
|
68 |
n: number of anchors
|
69 |
img_size: image size used for training
|
70 |
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
|
|
|
103 |
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
104 |
return k
|
105 |
|
106 |
+
if isinstance(dataset, str): # *.yaml file
|
107 |
+
with open(dataset, encoding='ascii', errors='ignore') as f:
|
108 |
data_dict = yaml.safe_load(f) # model dict
|
109 |
from utils.datasets import LoadImagesAndLabels
|
110 |
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
|
|
|
|
|
111 |
|
112 |
# Get label wh
|
113 |
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
utils/datasets.py
CHANGED
@@ -909,7 +909,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False):
|
|
909 |
return False, None, path
|
910 |
|
911 |
zipped, data_dir, yaml_path = unzip(Path(path))
|
912 |
-
with open(check_file(yaml_path)) as f:
|
913 |
data = yaml.safe_load(f) # data dict
|
914 |
if zipped:
|
915 |
data['path'] = data_dir # TODO: should this be dir.resolve()?
|
|
|
909 |
return False, None, path
|
910 |
|
911 |
zipped, data_dir, yaml_path = unzip(Path(path))
|
912 |
+
with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f:
|
913 |
data = yaml.safe_load(f) # data dict
|
914 |
if zipped:
|
915 |
data['path'] = data_dir # TODO: should this be dir.resolve()?
|
utils/loggers/wandb/log_dataset.py
CHANGED
@@ -8,9 +8,9 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
|
|
8 |
|
9 |
|
10 |
def create_dataset_artifact(opt):
|
11 |
-
with open(opt.data) as f:
|
12 |
data = yaml.safe_load(f) # data dict
|
13 |
-
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
|
14 |
|
15 |
|
16 |
if __name__ == '__main__':
|
|
|
8 |
|
9 |
|
10 |
def create_dataset_artifact(opt):
|
11 |
+
with open(opt.data, encoding='ascii', errors='ignore') as f:
|
12 |
data = yaml.safe_load(f) # data dict
|
13 |
+
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') # TODO: return value unused
|
14 |
|
15 |
|
16 |
if __name__ == '__main__':
|
utils/loggers/wandb/wandb_utils.py
CHANGED
@@ -62,7 +62,7 @@ def check_wandb_resume(opt):
|
|
62 |
|
63 |
|
64 |
def process_wandb_config_ddp_mode(opt):
|
65 |
-
with open(check_file(opt.data)) as f:
|
66 |
data_dict = yaml.safe_load(f) # data dict
|
67 |
train_dir, val_dir = None, None
|
68 |
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
|
@@ -150,7 +150,7 @@ class WandbLogger():
|
|
150 |
opt.single_cls,
|
151 |
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
|
152 |
print("Created dataset config file ", config_path)
|
153 |
-
with open(config_path) as f:
|
154 |
wandb_data_dict = yaml.safe_load(f)
|
155 |
return wandb_data_dict
|
156 |
|
@@ -226,7 +226,7 @@ class WandbLogger():
|
|
226 |
print("Saving model artifact on epoch ", epoch + 1)
|
227 |
|
228 |
def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
|
229 |
-
with open(data_file) as f:
|
230 |
data = yaml.safe_load(f) # data dict
|
231 |
check_dataset(data)
|
232 |
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
|
|
|
62 |
|
63 |
|
64 |
def process_wandb_config_ddp_mode(opt):
|
65 |
+
with open(check_file(opt.data), encoding='ascii', errors='ignore') as f:
|
66 |
data_dict = yaml.safe_load(f) # data dict
|
67 |
train_dir, val_dir = None, None
|
68 |
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
|
|
|
150 |
opt.single_cls,
|
151 |
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
|
152 |
print("Created dataset config file ", config_path)
|
153 |
+
with open(config_path, encoding='ascii', errors='ignore') as f:
|
154 |
wandb_data_dict = yaml.safe_load(f)
|
155 |
return wandb_data_dict
|
156 |
|
|
|
226 |
print("Saving model artifact on epoch ", epoch + 1)
|
227 |
|
228 |
def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
|
229 |
+
with open(data_file, encoding='ascii', errors='ignore') as f:
|
230 |
data = yaml.safe_load(f) # data dict
|
231 |
check_dataset(data)
|
232 |
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
|
val.py
CHANGED
@@ -123,7 +123,7 @@ def run(data,
|
|
123 |
# model = nn.DataParallel(model)
|
124 |
|
125 |
# Data
|
126 |
-
with open(data) as f:
|
127 |
data = yaml.safe_load(f)
|
128 |
check_dataset(data) # check
|
129 |
|
|
|
123 |
# model = nn.DataParallel(model)
|
124 |
|
125 |
# Data
|
126 |
+
with open(data, encoding='ascii', errors='ignore') as f:
|
127 |
data = yaml.safe_load(f)
|
128 |
check_dataset(data) # check
|
129 |
|