File size: 11,914 Bytes
3ef1661
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
import importlib 
import torch
import torch.distributed as dist
from .avg_meter import AverageMeter
from collections import defaultdict, OrderedDict
import os
import socket
from mmcv.utils import collect_env as collect_base_env
try:
    from mmcv.utils import get_git_hash
except:
    from mmengine import get_git_hash
#import mono.mmseg as mmseg
import mmseg
import time
import datetime
import logging


def main_process() -> bool:
    return get_rank() == 0
    #return not cfg.distributed or \
    #       (cfg.distributed and cfg.local_rank == 0)

def get_world_size() -> int:
    if not dist.is_available():
        return 1
    if not dist.is_initialized():
        return 1
    return dist.get_world_size()

def get_rank() -> int:
    if not dist.is_available():
        return 0
    if not dist.is_initialized():
        return 0
    return dist.get_rank()

def _find_free_port():
    # refer to https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py # noqa: E501
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # Binding to port 0 will cause the OS to find an available port for us
    sock.bind(('', 0))
    port = sock.getsockname()[1]
    sock.close()
    # NOTE: there is still a chance the port could be taken by other processes.
    return port 

def _is_free_port(port):
    ips = socket.gethostbyname_ex(socket.gethostname())[-1]
    ips.append('localhost')
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        return all(s.connect_ex((ip, port)) != 0 for ip in ips)


def collect_env():
    """Collect the information of the running environments."""
    env_info = collect_base_env()
    env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'

    return env_info

def init_env(launcher, cfg):
    """Initialize distributed training environment.
    If argument ``cfg.dist_params.dist_url`` is specified as 'env://', then the master port will be system
    environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
    environment variable, then a default port ``29500`` will be used.
    """
    if launcher == 'slurm':
        _init_dist_slurm(cfg)
    elif launcher == 'ror':
        _init_dist_ror(cfg)
    elif launcher == 'None':
        _init_none_dist(cfg)
    elif launcher == 'pytorch':
        _init_dist_pytorch(cfg)
    else:
        raise RuntimeError(f'{cfg.launcher} has not been supported!')

def _init_none_dist(cfg):
    cfg.dist_params.num_gpus_per_node = 1
    cfg.dist_params.world_size = 1
    cfg.dist_params.nnodes = 1
    cfg.dist_params.node_rank = 0
    cfg.dist_params.global_rank = 0
    cfg.dist_params.local_rank = 0
    os.environ["WORLD_SIZE"] = str(1)

def _init_dist_ror(cfg):
    from ac2.ror.comm import get_local_rank, get_world_rank, get_local_size, get_node_rank, get_world_size
    cfg.dist_params.num_gpus_per_node = get_local_size()
    cfg.dist_params.world_size = get_world_size()
    cfg.dist_params.nnodes = (get_world_size()) // (get_local_size())
    cfg.dist_params.node_rank = get_node_rank()
    cfg.dist_params.global_rank = get_world_rank()
    cfg.dist_params.local_rank = get_local_rank()
    os.environ["WORLD_SIZE"] = str(get_world_size())


def _init_dist_pytorch(cfg):
    # load env. paras.
    local_rank = int(os.environ['LOCAL_RANK'])
    world_size = int(os.environ['WORLD_SIZE'])
    global_rank = int(os.environ['RANK'])
    num_gpus = torch.cuda.device_count()
    
    cfg.dist_params.num_gpus_per_node = num_gpus
    cfg.dist_params.world_size = world_size
    cfg.dist_params.nnodes = int(world_size // num_gpus)
    cfg.dist_params.node_rank = int(global_rank % num_gpus)
    cfg.dist_params.global_rank = global_rank

    os.environ['NODE_RANK'] = str(cfg.dist_params.node_rank)
    # set dist_url to 'env://' 
    cfg.dist_params.dist_url =  'env://' #f"{master_addr}:{master_port}"


def _init_dist_slurm(cfg):
    if 'NNODES' not in os.environ:
        os.environ['NNODES'] = str(cfg.dist_params.nnodes)
    if 'NODE_RANK' not in os.environ:
        os.environ['NODE_RANK'] = str(cfg.dist_params.node_rank)

    #cfg.dist_params.
    num_gpus = torch.cuda.device_count()
    world_size = int(os.environ['NNODES']) * num_gpus
    os.environ['WORLD_SIZE'] = str(world_size)

    # config port
    if 'MASTER_PORT' in os.environ:
        master_port = str(os.environ['MASTER_PORT'])  # use MASTER_PORT in the environment variable
    else:
        # if torch.distributed default port(29500) is available
        # then use it, else find a free port
        if _is_free_port(16500):
            master_port = '16500'
        else:
            master_port = str(_find_free_port())
        os.environ['MASTER_PORT'] = master_port

    # config addr
    if 'MASTER_ADDR' in os.environ:
        master_addr = str(os.environ['MASTER_PORT'])  # use MASTER_PORT in the environment variable
    # elif cfg.dist_params.dist_url is not None:
    #     master_addr = ':'.join(str(cfg.dist_params.dist_url).split(':')[:2])
    else:
        master_addr = '127.0.0.1' #'tcp://127.0.0.1'
        os.environ['MASTER_ADDR'] = master_addr

    # set dist_url to 'env://' 
    cfg.dist_params.dist_url =  'env://' #f"{master_addr}:{master_port}"
    
    cfg.dist_params.num_gpus_per_node = num_gpus
    cfg.dist_params.world_size = world_size
    cfg.dist_params.nnodes = int(os.environ['NNODES'])
    cfg.dist_params.node_rank = int(os.environ['NODE_RANK'])
        
    # if int(os.environ['NNODES']) > 1 and cfg.dist_params.dist_url.startswith("file://"):
    #     raise Warning("file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://")
        

def get_func(func_name):
    """
        Helper to return a function object by name. func_name must identify 
        a function in this module or the path to a function relative to the base
        module.
        @ func_name: function name.
    """
    if func_name == '':
        return None
    try:
        parts = func_name.split('.')
        # Refers to a function in this module
        if len(parts) == 1:
            return globals()[parts[0]]
        # Otherwise, assume we're referencing a module under modeling
        module_name = '.'.join(parts[:-1])
        module = importlib.import_module(module_name)
        return getattr(module, parts[-1])
    except:
        raise RuntimeError(f'Failed to find function: {func_name}')

class Timer(object):
    """A simple timer."""

    def __init__(self):
        self.reset()

    def tic(self):
        # using time.time instead of time.clock because time time.clock
        # does not normalize for multithreading
        self.start_time = time.time()

    def toc(self, average=True):
        self.diff = time.time() - self.start_time
        self.total_time += self.diff
        self.calls += 1
        self.average_time = self.total_time / self.calls
        if average:
            return self.average_time
        else:
            return self.diff

    def reset(self):
        self.total_time = 0.
        self.calls = 0
        self.start_time = 0.
        self.diff = 0.
        self.average_time = 0.

class TrainingStats(object):
    """Track vital training statistics."""
    def __init__(self, log_period, tensorboard_logger=None):
        self.log_period = log_period
        self.tblogger = tensorboard_logger
        self.tb_ignored_keys = ['iter', 'eta', 'epoch', 'time', 'val_err']
        self.iter_timer = Timer()
        # Window size for smoothing tracked values (with median filtering)
        self.filter_size = log_period
        def create_smoothed_value():
            return AverageMeter()
        self.smoothed_losses = defaultdict(create_smoothed_value)
        #self.smoothed_metrics = defaultdict(create_smoothed_value)
        #self.smoothed_total_loss = AverageMeter()


    def IterTic(self):
        self.iter_timer.tic()

    def IterToc(self):
        return self.iter_timer.toc(average=False)

    def reset_iter_time(self):
        self.iter_timer.reset()

    def update_iter_stats(self, losses_dict):
        """Update tracked iteration statistics."""
        for k, v in losses_dict.items():
            self.smoothed_losses[k].update(float(v), 1)

    def log_iter_stats(self, cur_iter, optimizer, max_iters, val_err={}):
        """Log the tracked statistics."""
        if (cur_iter % self.log_period == 0):
            stats = self.get_stats(cur_iter, optimizer, max_iters, val_err)
            log_stats(stats)
            if self.tblogger:
                self.tb_log_stats(stats, cur_iter)
            for k, v in self.smoothed_losses.items():
                v.reset()
            self.iter_timer.reset() # reset time counting every log period

    def tb_log_stats(self, stats, cur_iter):
        """Log the tracked statistics to tensorboard"""
        for k in stats:
            # ignore some logs
            if k not in self.tb_ignored_keys:
                v = stats[k]
                if isinstance(v, dict):
                    self.tb_log_stats(v, cur_iter)
                else:
                    self.tblogger.add_scalar(k, v, cur_iter)


    def get_stats(self, cur_iter, optimizer, max_iters, val_err = {}):
        eta_seconds = self.iter_timer.average_time * (max_iters - cur_iter)

        eta = str(datetime.timedelta(seconds=int(eta_seconds)))
        stats = OrderedDict(
            iter=cur_iter,  # 1-indexed
            time=self.iter_timer.average_time,
            eta=eta,
        )
        optimizer_state_dict = optimizer.state_dict()
        lr = {}
        for i in range(len(optimizer_state_dict['param_groups'])):
            lr_name = 'group%d_lr' % i
            lr[lr_name] = optimizer_state_dict['param_groups'][i]['lr']

        stats['lr'] = OrderedDict(lr)
        for k, v in self.smoothed_losses.items():
            stats[k] = v.avg

        stats['val_err'] = OrderedDict(val_err)
        stats['max_iters'] = max_iters
        return stats


def reduce_dict(input_dict, average=True):
    """
    Reduce the values in the dictionary from all processes so that process with rank
    0 has the reduced results.
    Args:
        @input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
        @average (bool): whether to do average or sum
    Returns:
        a dict with the same keys as input_dict, after reduction.
    """
    world_size = get_world_size()
    if world_size < 2:
        return input_dict
    with torch.no_grad():
        names = []
        values = []
        # sort the keys so that they are consistent across processes
        for k in sorted(input_dict.keys()):
            names.append(k)
            values.append(input_dict[k])
        values = torch.stack(values, dim=0)
        dist.reduce(values, dst=0)
        if dist.get_rank() == 0 and average:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            values /= world_size
        reduced_dict = {k: v for k, v in zip(names, values)}
    return reduced_dict


def log_stats(stats):
    logger = logging.getLogger()
    """Log training statistics to terminal"""
    lines = "[Step %d/%d]\n" % (
            stats['iter'], stats['max_iters'])

    lines += "\t\tloss: %.3f,    time: %.6f,    eta: %s\n" % (
        stats['total_loss'], stats['time'], stats['eta'])

    # log loss
    lines += "\t\t" 
    for k, v in stats.items():
        if 'loss' in k.lower() and 'total_loss' not in k.lower():
            lines += "%s: %.3f" % (k, v)  + ",  "
    lines = lines[:-3]
    lines += '\n'

    # validate criteria
    lines += "\t\tlast val err:" + ",  ".join("%s: %.6f" % (k, v) for k, v in stats['val_err'].items()) + ", "
    lines += '\n'

    # lr in different groups
    lines += "\t\t" +  ",  ".join("%s: %.8f" % (k, v) for k, v in stats['lr'].items())
    lines += '\n'
    logger.info(lines[:-1])  # remove last new linen_pxl