microScan / utils /logging.py
crazyscientist1's picture
initial commit
d70f24c
raw
history blame
No virus
3.21 kB
import logging
import os
import pandas as pd
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def set_log(log_dir):
logging.basicConfig(
# level=logging.DEBUG,
format='%(message)s',
# datefmt='%a, %d %b %Y %H:%M:%S',
filename=f"{log_dir}/train.log",
filemode='w'
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# add the handler to the root logger
logging.getLogger().addHandler(console)
def log(content, *args):
for arg in args:
content += str(arg)
logger.info(content)
def coco_log(log_dir, stats):
log_dict_keys = [
'Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
'Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ]',
'Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ]',
'Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
'Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
'Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ]',
'Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ]',
'Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ]',
'Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
'Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
'Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
'Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ]',
]
log_dict = {}
# for i, key in enumerate(log_dict_keys):
# log_dict[key] = stats[i]
with open(f"{log_dir}/train.log", 'a+') as f:
f.writelines('\n')
for i, key in enumerate(log_dict_keys):
out_str = f"{key} = {stats[i]}"
logger.debug(out_str) # DEBUG model so as not to print on console.
logger.debug('\n'*2) # DEBUG model so as not to print on console.
# f.close()
def tensorboard_loss_log(name, loss_np_arr, writer):
"""
To plot graphs for TensorBoard log. The save directory for this
is the same as the training result save directory.
"""
for i in range(len(loss_np_arr)):
writer.add_scalar(name, loss_np_arr[i], i)
def tensorboard_map_log(name, val_map_05, val_map, writer):
for i in range(len(val_map)):
writer.add_scalars(
name,
{
'mAP@0.5': val_map_05[i],
'mAP@0.5_0.95': val_map[i]
},
i
)
def create_log_csv(log_dir):
cols = ['epoch', 'map', 'map_05']
results_csv = pd.DataFrame(columns=cols)
results_csv.to_csv(os.path.join(log_dir, 'results.csv'), index=False)
def csv_log(log_dir, stats, epoch):
if epoch+1 == 1:
create_log_csv(log_dir)
df = pd.DataFrame(
{
'epoch': int(epoch+1),
'map_05': [float(stats[0])],
'map': [float(stats[1])],
}
)
df.to_csv(
os.path.join(log_dir, 'results.csv'),
mode='a',
index=False,
header=False
)