Spaces:
Configuration error
Configuration error
Commit
·
8a3cc11
1
Parent(s):
c0e7ea8
Upload hubconf.py
Browse files- hubconf.py +145 -0
hubconf.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
| 2 |
+
"""
|
| 3 |
+
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
|
| 4 |
+
|
| 5 |
+
Usage:
|
| 6 |
+
import torch
|
| 7 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
|
| 8 |
+
model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 15 |
+
"""Creates or loads a YOLOv5 model
|
| 16 |
+
|
| 17 |
+
Arguments:
|
| 18 |
+
name (str): model name 'yolov5s' or path 'path/to/best.pt'
|
| 19 |
+
pretrained (bool): load pretrained weights into the model
|
| 20 |
+
channels (int): number of input channels
|
| 21 |
+
classes (int): number of model classes
|
| 22 |
+
autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
|
| 23 |
+
verbose (bool): print all information to screen
|
| 24 |
+
device (str, torch.device, None): device to use for model parameters
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
YOLOv5 model
|
| 28 |
+
"""
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
|
| 31 |
+
from models.common import AutoShape, DetectMultiBackend
|
| 32 |
+
from models.yolo import Model
|
| 33 |
+
from utils.downloads import attempt_download
|
| 34 |
+
from utils.general import LOGGER, check_requirements, intersect_dicts, logging
|
| 35 |
+
from utils.torch_utils import select_device
|
| 36 |
+
|
| 37 |
+
if not verbose:
|
| 38 |
+
LOGGER.setLevel(logging.WARNING)
|
| 39 |
+
check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))
|
| 40 |
+
name = Path(name)
|
| 41 |
+
path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path
|
| 42 |
+
try:
|
| 43 |
+
device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)
|
| 44 |
+
|
| 45 |
+
if pretrained and channels == 3 and classes == 80:
|
| 46 |
+
model = DetectMultiBackend(path, device=device) # download/load FP32 model
|
| 47 |
+
# model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model
|
| 48 |
+
else:
|
| 49 |
+
cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
|
| 50 |
+
model = Model(cfg, channels, classes) # create model
|
| 51 |
+
if pretrained:
|
| 52 |
+
ckpt = torch.load(attempt_download(path), map_location=device) # load
|
| 53 |
+
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
| 54 |
+
csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
|
| 55 |
+
model.load_state_dict(csd, strict=False) # load
|
| 56 |
+
if len(ckpt['model'].names) == classes:
|
| 57 |
+
model.names = ckpt['model'].names # set class names attribute
|
| 58 |
+
if autoshape:
|
| 59 |
+
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
|
| 60 |
+
return model.to(device)
|
| 61 |
+
|
| 62 |
+
except Exception as e:
|
| 63 |
+
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
|
| 64 |
+
s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
|
| 65 |
+
raise Exception(s) from e
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
|
| 69 |
+
# YOLOv5 custom or local model
|
| 70 |
+
return _create(path, autoshape=autoshape, verbose=verbose, device=device)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 74 |
+
# YOLOv5-nano model https://github.com/ultralytics/yolov5
|
| 75 |
+
return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 79 |
+
# YOLOv5-small model https://github.com/ultralytics/yolov5
|
| 80 |
+
return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 84 |
+
# YOLOv5-medium model https://github.com/ultralytics/yolov5
|
| 85 |
+
return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 89 |
+
# YOLOv5-large model https://github.com/ultralytics/yolov5
|
| 90 |
+
return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 94 |
+
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5
|
| 95 |
+
return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 99 |
+
# YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
|
| 100 |
+
return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 104 |
+
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
|
| 105 |
+
return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 109 |
+
# YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
|
| 110 |
+
return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 114 |
+
# YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
|
| 115 |
+
return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
| 119 |
+
# YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
|
| 120 |
+
return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
if __name__ == '__main__':
|
| 124 |
+
model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
|
| 125 |
+
# model = custom(path='path/to/model.pt') # custom
|
| 126 |
+
|
| 127 |
+
# Verify inference
|
| 128 |
+
from pathlib import Path
|
| 129 |
+
|
| 130 |
+
import numpy as np
|
| 131 |
+
from PIL import Image
|
| 132 |
+
|
| 133 |
+
from utils.general import cv2
|
| 134 |
+
|
| 135 |
+
imgs = [
|
| 136 |
+
'data/images/zidane.jpg', # filename
|
| 137 |
+
Path('data/images/zidane.jpg'), # Path
|
| 138 |
+
'https://ultralytics.com/images/zidane.jpg', # URI
|
| 139 |
+
cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
|
| 140 |
+
Image.open('data/images/bus.jpg'), # PIL
|
| 141 |
+
np.zeros((320, 640, 3))] # numpy
|
| 142 |
+
|
| 143 |
+
results = model(imgs, size=320) # batched inference
|
| 144 |
+
results.print()
|
| 145 |
+
results.save()
|