File size: 5,716 Bytes
ab854b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# Ultralytics YOLO 🚀, AGPL-3.0 license

import subprocess

from ultralytics.cfg import TASK2DATA, TASK2METRIC
from ultralytics.utils import DEFAULT_CFG_DICT, LOGGER, NUM_THREADS


def run_ray_tune(model,
                 space: dict = None,
                 grace_period: int = 10,
                 gpu_per_trial: int = None,
                 max_samples: int = 10,
                 **train_args):
    """
    Runs hyperparameter tuning using Ray Tune.

    Args:
        model (YOLO): Model to run the tuner on.
        space (dict, optional): The hyperparameter search space. Defaults to None.
        grace_period (int, optional): The grace period in epochs of the ASHA scheduler. Defaults to 10.
        gpu_per_trial (int, optional): The number of GPUs to allocate per trial. Defaults to None.
        max_samples (int, optional): The maximum number of trials to run. Defaults to 10.
        train_args (dict, optional): Additional arguments to pass to the `train()` method. Defaults to {}.

    Returns:
        (dict): A dictionary containing the results of the hyperparameter search.

    Example:
        ```python
        from ultralytics import YOLO

        # Load a YOLOv8n model
        model = YOLO('yolov8n.pt')

        # Start tuning hyperparameters for YOLOv8n training on the COCO8 dataset
        result_grid = model.tune(data='coco8.yaml', use_ray=True)
        ```
    """
    if train_args is None:
        train_args = {}

    try:
        subprocess.run('pip install ray[tune]'.split(), check=True)

        from ray import tune
        from ray.air import RunConfig
        from ray.air.integrations.wandb import WandbLoggerCallback
        from ray.tune.schedulers import ASHAScheduler
    except ImportError:
        raise ModuleNotFoundError('Tuning hyperparameters requires Ray Tune. Install with: pip install "ray[tune]"')

    try:
        import wandb

        assert hasattr(wandb, '__version__')
    except (ImportError, AssertionError):
        wandb = False

    default_space = {
        # 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']),
        'lr0': tune.uniform(1e-5, 1e-1),
        'lrf': tune.uniform(0.01, 1.0),  # final OneCycleLR learning rate (lr0 * lrf)
        'momentum': tune.uniform(0.6, 0.98),  # SGD momentum/Adam beta1
        'weight_decay': tune.uniform(0.0, 0.001),  # optimizer weight decay 5e-4
        'warmup_epochs': tune.uniform(0.0, 5.0),  # warmup epochs (fractions ok)
        'warmup_momentum': tune.uniform(0.0, 0.95),  # warmup initial momentum
        'box': tune.uniform(0.02, 0.2),  # box loss gain
        'cls': tune.uniform(0.2, 4.0),  # cls loss gain (scale with pixels)
        'hsv_h': tune.uniform(0.0, 0.1),  # image HSV-Hue augmentation (fraction)
        'hsv_s': tune.uniform(0.0, 0.9),  # image HSV-Saturation augmentation (fraction)
        'hsv_v': tune.uniform(0.0, 0.9),  # image HSV-Value augmentation (fraction)
        'degrees': tune.uniform(0.0, 45.0),  # image rotation (+/- deg)
        'translate': tune.uniform(0.0, 0.9),  # image translation (+/- fraction)
        'scale': tune.uniform(0.0, 0.9),  # image scale (+/- gain)
        'shear': tune.uniform(0.0, 10.0),  # image shear (+/- deg)
        'perspective': tune.uniform(0.0, 0.001),  # image perspective (+/- fraction), range 0-0.001
        'flipud': tune.uniform(0.0, 1.0),  # image flip up-down (probability)
        'fliplr': tune.uniform(0.0, 1.0),  # image flip left-right (probability)
        'mosaic': tune.uniform(0.0, 1.0),  # image mixup (probability)
        'mixup': tune.uniform(0.0, 1.0),  # image mixup (probability)
        'copy_paste': tune.uniform(0.0, 1.0)}  # segment copy-paste (probability)

    def _tune(config):
        """
        Trains the YOLO model with the specified hyperparameters and additional arguments.

        Args:
            config (dict): A dictionary of hyperparameters to use for training.

        Returns:
            None.
        """
        model._reset_callbacks()
        config.update(train_args)
        model.train(**config)

    # Get search space
    if not space:
        space = default_space
        LOGGER.warning('WARNING ⚠️ search space not provided, using default search space.')

    # Get dataset
    data = train_args.get('data', TASK2DATA[model.task])
    space['data'] = data
    if 'data' not in train_args:
        LOGGER.warning(f'WARNING ⚠️ data not provided, using default "data={data}".')

    # Define the trainable function with allocated resources
    trainable_with_resources = tune.with_resources(_tune, {'cpu': NUM_THREADS, 'gpu': gpu_per_trial or 0})

    # Define the ASHA scheduler for hyperparameter search
    asha_scheduler = ASHAScheduler(time_attr='epoch',
                                   metric=TASK2METRIC[model.task],
                                   mode='max',
                                   max_t=train_args.get('epochs') or DEFAULT_CFG_DICT['epochs'] or 100,
                                   grace_period=grace_period,
                                   reduction_factor=3)

    # Define the callbacks for the hyperparameter search
    tuner_callbacks = [WandbLoggerCallback(project='YOLOv8-tune')] if wandb else []

    # Create the Ray Tune hyperparameter search tuner
    tuner = tune.Tuner(trainable_with_resources,
                       param_space=space,
                       tune_config=tune.TuneConfig(scheduler=asha_scheduler, num_samples=max_samples),
                       run_config=RunConfig(callbacks=tuner_callbacks, storage_path='./runs/tune'))

    # Run the hyperparameter search
    tuner.fit()

    # Return the results of the hyperparameter search
    return tuner.get_results()