glenn-jocher
commited on
Commit
•
e6e36aa
1
Parent(s):
acc58c1
Update bytes to GB with bitshift (#6886)
Browse files- utils/__init__.py +3 -4
- utils/autobatch.py +4 -3
- utils/general.py +3 -2
- utils/torch_utils.py +1 -1
utils/__init__.py
CHANGED
@@ -21,14 +21,13 @@ def notebook_init(verbose=True):
|
|
21 |
if is_colab():
|
22 |
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
|
23 |
|
|
|
24 |
if verbose:
|
25 |
-
#
|
26 |
-
# gb = 1 / 1000 ** 3 # bytes to GB
|
27 |
-
gib = 1 / 1024 ** 3 # bytes to GiB
|
28 |
ram = psutil.virtual_memory().total
|
29 |
total, used, free = shutil.disk_usage("/")
|
30 |
display.clear_output()
|
31 |
-
s = f'({os.cpu_count()} CPUs, {ram
|
32 |
else:
|
33 |
s = ''
|
34 |
|
|
|
21 |
if is_colab():
|
22 |
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
|
23 |
|
24 |
+
# System info
|
25 |
if verbose:
|
26 |
+
gb = 1 << 30 # bytes to GiB (1024 ** 3)
|
|
|
|
|
27 |
ram = psutil.virtual_memory().total
|
28 |
total, used, free = shutil.disk_usage("/")
|
29 |
display.clear_output()
|
30 |
+
s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
|
31 |
else:
|
32 |
s = ''
|
33 |
|
utils/autobatch.py
CHANGED
@@ -34,11 +34,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
|
|
34 |
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
|
35 |
return batch_size
|
36 |
|
|
|
37 |
d = str(device).upper() # 'CUDA:0'
|
38 |
properties = torch.cuda.get_device_properties(device) # device properties
|
39 |
-
t = properties.total_memory /
|
40 |
-
r = torch.cuda.memory_reserved(device) /
|
41 |
-
a = torch.cuda.memory_allocated(device) /
|
42 |
f = t - (r + a) # free inside reserved
|
43 |
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
|
44 |
|
|
|
34 |
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
|
35 |
return batch_size
|
36 |
|
37 |
+
gb = 1 << 30 # bytes to GiB (1024 ** 3)
|
38 |
d = str(device).upper() # 'CUDA:0'
|
39 |
properties = torch.cuda.get_device_properties(device) # device properties
|
40 |
+
t = properties.total_memory / gb # (GiB)
|
41 |
+
r = torch.cuda.memory_reserved(device) / gb # (GiB)
|
42 |
+
a = torch.cuda.memory_allocated(device) / gb # (GiB)
|
43 |
f = t - (r + a) # free inside reserved
|
44 |
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
|
45 |
|
utils/general.py
CHANGED
@@ -223,11 +223,12 @@ def emojis(str=''):
|
|
223 |
|
224 |
def file_size(path):
|
225 |
# Return file/dir size (MB)
|
|
|
226 |
path = Path(path)
|
227 |
if path.is_file():
|
228 |
-
return path.stat().st_size /
|
229 |
elif path.is_dir():
|
230 |
-
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) /
|
231 |
else:
|
232 |
return 0.0
|
233 |
|
|
|
223 |
|
224 |
def file_size(path):
|
225 |
# Return file/dir size (MB)
|
226 |
+
mb = 1 << 20 # bytes to MiB (1024 ** 2)
|
227 |
path = Path(path)
|
228 |
if path.is_file():
|
229 |
+
return path.stat().st_size / mb
|
230 |
elif path.is_dir():
|
231 |
+
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
|
232 |
else:
|
233 |
return 0.0
|
234 |
|
utils/torch_utils.py
CHANGED
@@ -86,7 +86,7 @@ def select_device(device='', batch_size=0, newline=True):
|
|
86 |
space = ' ' * (len(s) + 1)
|
87 |
for i, d in enumerate(devices):
|
88 |
p = torch.cuda.get_device_properties(i)
|
89 |
-
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory /
|
90 |
else:
|
91 |
s += 'CPU\n'
|
92 |
|
|
|
86 |
space = ' ' * (len(s) + 1)
|
87 |
for i, d in enumerate(devices):
|
88 |
p = torch.cuda.get_device_properties(i)
|
89 |
+
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB
|
90 |
else:
|
91 |
s += 'CPU\n'
|
92 |
|