|
import glob |
|
import json |
|
import os |
|
import shutil |
|
import operator |
|
import sys |
|
import argparse |
|
from absl import app, flags, logging |
|
from absl.flags import FLAGS |
|
|
|
MINOVERLAP = 0.5 |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('-na', '--no-animation',default=True, help="no animation is shown.", action="store_true") |
|
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true") |
|
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true") |
|
|
|
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.") |
|
parser.add_argument('-o', '--output', default="results", type=str, help="output path name") |
|
|
|
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.") |
|
args = parser.parse_args() |
|
|
|
|
|
if args.ignore is None: |
|
args.ignore = [] |
|
|
|
specific_iou_flagged = False |
|
if args.set_class_iou is not None: |
|
specific_iou_flagged = True |
|
|
|
|
|
img_path = 'images' |
|
if os.path.exists(img_path): |
|
for dirpath, dirnames, files in os.walk(img_path): |
|
if not files: |
|
|
|
args.no_animation = True |
|
else: |
|
args.no_animation = True |
|
|
|
|
|
show_animation = False |
|
if not args.no_animation: |
|
try: |
|
import cv2 |
|
show_animation = True |
|
except ImportError: |
|
print("\"opencv-python\" not found, please install to visualize the results.") |
|
args.no_animation = True |
|
|
|
|
|
draw_plot = False |
|
if not args.no_plot: |
|
try: |
|
import matplotlib.pyplot as plt |
|
draw_plot = True |
|
except ImportError: |
|
print("\"matplotlib\" not found, please install it to get the resulting plots.") |
|
args.no_plot = True |
|
|
|
""" |
|
throw error and exit |
|
""" |
|
def error(msg): |
|
print(msg) |
|
sys.exit(0) |
|
|
|
""" |
|
check if the number is a float between 0.0 and 1.0 |
|
""" |
|
def is_float_between_0_and_1(value): |
|
try: |
|
val = float(value) |
|
if val > 0.0 and val < 1.0: |
|
return True |
|
else: |
|
return False |
|
except ValueError: |
|
return False |
|
|
|
""" |
|
Calculate the AP given the recall and precision array |
|
1st) We compute a version of the measured precision/recall curve with |
|
precision monotonically decreasing |
|
2nd) We compute the AP as the area under this curve by numerical integration. |
|
""" |
|
def voc_ap(rec, prec): |
|
""" |
|
--- Official matlab code VOC2012--- |
|
mrec=[0 ; rec ; 1]; |
|
mpre=[0 ; prec ; 0]; |
|
for i=numel(mpre)-1:-1:1 |
|
mpre(i)=max(mpre(i),mpre(i+1)); |
|
end |
|
i=find(mrec(2:end)~=mrec(1:end-1))+1; |
|
ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); |
|
""" |
|
rec.insert(0, 0.0) |
|
rec.append(1.0) |
|
mrec = rec[:] |
|
prec.insert(0, 0.0) |
|
prec.append(0.0) |
|
mpre = prec[:] |
|
""" |
|
This part makes the precision monotonically decreasing |
|
(goes from the end to the beginning) |
|
matlab: for i=numel(mpre)-1:-1:1 |
|
mpre(i)=max(mpre(i),mpre(i+1)); |
|
""" |
|
|
|
|
|
|
|
|
|
for i in range(len(mpre)-2, -1, -1): |
|
mpre[i] = max(mpre[i], mpre[i+1]) |
|
""" |
|
This part creates a list of indexes where the recall changes |
|
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1; |
|
""" |
|
i_list = [] |
|
for i in range(1, len(mrec)): |
|
if mrec[i] != mrec[i-1]: |
|
i_list.append(i) |
|
""" |
|
The Average Precision (AP) is the area under the curve |
|
(numerical integration) |
|
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); |
|
""" |
|
ap = 0.0 |
|
for i in i_list: |
|
ap += ((mrec[i]-mrec[i-1])*mpre[i]) |
|
return ap, mrec, mpre |
|
|
|
|
|
""" |
|
Convert the lines of a file to a list |
|
""" |
|
def file_lines_to_list(path): |
|
|
|
with open(path) as f: |
|
content = f.readlines() |
|
|
|
content = [x.strip() for x in content] |
|
return content |
|
|
|
""" |
|
Draws text in image |
|
""" |
|
def draw_text_in_image(img, text, pos, color, line_width): |
|
font = cv2.FONT_HERSHEY_PLAIN |
|
fontScale = 1 |
|
lineType = 1 |
|
bottomLeftCornerOfText = pos |
|
cv2.putText(img, text, |
|
bottomLeftCornerOfText, |
|
font, |
|
fontScale, |
|
color, |
|
lineType) |
|
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0] |
|
return img, (line_width + text_width) |
|
|
|
""" |
|
Plot - adjust axes |
|
""" |
|
def adjust_axes(r, t, fig, axes): |
|
|
|
bb = t.get_window_extent(renderer=r) |
|
text_width_inches = bb.width / fig.dpi |
|
|
|
current_fig_width = fig.get_figwidth() |
|
new_fig_width = current_fig_width + text_width_inches |
|
propotion = new_fig_width / current_fig_width |
|
|
|
x_lim = axes.get_xlim() |
|
axes.set_xlim([x_lim[0], x_lim[1]*propotion]) |
|
|
|
""" |
|
Draw plot using Matplotlib |
|
""" |
|
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar): |
|
|
|
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1)) |
|
|
|
sorted_keys, sorted_values = zip(*sorted_dic_by_value) |
|
|
|
if true_p_bar != "": |
|
""" |
|
Special case to draw in (green=true predictions) & (red=false predictions) |
|
""" |
|
fp_sorted = [] |
|
tp_sorted = [] |
|
for key in sorted_keys: |
|
fp_sorted.append(dictionary[key] - true_p_bar[key]) |
|
tp_sorted.append(true_p_bar[key]) |
|
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions') |
|
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted) |
|
|
|
plt.legend(loc='lower right') |
|
""" |
|
Write number on side of bar |
|
""" |
|
fig = plt.gcf() |
|
axes = plt.gca() |
|
r = fig.canvas.get_renderer() |
|
for i, val in enumerate(sorted_values): |
|
fp_val = fp_sorted[i] |
|
tp_val = tp_sorted[i] |
|
fp_str_val = " " + str(fp_val) |
|
tp_str_val = fp_str_val + " " + str(tp_val) |
|
|
|
|
|
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold') |
|
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold') |
|
if i == (len(sorted_values)-1): |
|
adjust_axes(r, t, fig, axes) |
|
else: |
|
plt.barh(range(n_classes), sorted_values, color=plot_color) |
|
""" |
|
Write number on side of bar |
|
""" |
|
fig = plt.gcf() |
|
axes = plt.gca() |
|
r = fig.canvas.get_renderer() |
|
for i, val in enumerate(sorted_values): |
|
str_val = " " + str(val) |
|
if val < 1.0: |
|
str_val = " {0:.2f}".format(val) |
|
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold') |
|
|
|
if i == (len(sorted_values)-1): |
|
adjust_axes(r, t, fig, axes) |
|
|
|
fig.canvas.set_window_title(window_title) |
|
|
|
tick_font_size = 12 |
|
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size) |
|
""" |
|
Re-scale height accordingly |
|
""" |
|
init_height = fig.get_figheight() |
|
|
|
dpi = fig.dpi |
|
height_pt = n_classes * (tick_font_size * 1.4) |
|
height_in = height_pt / dpi |
|
|
|
top_margin = 0.15 |
|
bottom_margin = 0.05 |
|
figure_height = height_in / (1 - top_margin - bottom_margin) |
|
|
|
if figure_height > init_height: |
|
fig.set_figheight(figure_height) |
|
|
|
|
|
plt.title(plot_title, fontsize=14) |
|
|
|
|
|
plt.xlabel(x_label, fontsize='large') |
|
|
|
fig.tight_layout() |
|
|
|
fig.savefig(output_path) |
|
|
|
if to_show: |
|
plt.show() |
|
|
|
plt.close() |
|
|
|
""" |
|
Create a "tmp_files/" and "results/" directory |
|
""" |
|
tmp_files_path = "tmp_files" |
|
if not os.path.exists(tmp_files_path): |
|
os.makedirs(tmp_files_path) |
|
results_files_path = args.output |
|
if os.path.exists(results_files_path): |
|
|
|
shutil.rmtree(results_files_path) |
|
|
|
os.makedirs(results_files_path) |
|
if draw_plot: |
|
os.makedirs(results_files_path + "/classes") |
|
if show_animation: |
|
os.makedirs(results_files_path + "/images") |
|
os.makedirs(results_files_path + "/images/single_predictions") |
|
|
|
""" |
|
Ground-Truth |
|
Load each of the ground-truth files into a temporary ".json" file. |
|
Create a list of all the class names present in the ground-truth (gt_classes). |
|
""" |
|
|
|
ground_truth_files_list = glob.glob('ground-truth/*.txt') |
|
if len(ground_truth_files_list) == 0: |
|
error("Error: No ground-truth files found!") |
|
ground_truth_files_list.sort() |
|
|
|
gt_counter_per_class = {} |
|
|
|
for txt_file in ground_truth_files_list: |
|
|
|
file_id = txt_file.split(".txt",1)[0] |
|
file_id = os.path.basename(os.path.normpath(file_id)) |
|
|
|
if not os.path.exists('predicted/' + file_id + ".txt"): |
|
error_msg = "Error. File not found: predicted/" + file_id + ".txt\n" |
|
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)" |
|
error(error_msg) |
|
lines_list = file_lines_to_list(txt_file) |
|
|
|
bounding_boxes = [] |
|
is_difficult = False |
|
for line in lines_list: |
|
try: |
|
if "difficult" in line: |
|
class_name, left, top, right, bottom, _difficult = line.split() |
|
is_difficult = True |
|
else: |
|
class_name, left, top, right, bottom = line.split() |
|
except ValueError: |
|
error_msg = "Error: File " + txt_file + " in the wrong format.\n" |
|
error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n" |
|
error_msg += " Received: " + line |
|
error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n" |
|
error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder." |
|
error(error_msg) |
|
|
|
if class_name in args.ignore: |
|
continue |
|
bbox = left + " " + top + " " + right + " " +bottom |
|
if is_difficult: |
|
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True}) |
|
is_difficult = False |
|
else: |
|
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False}) |
|
|
|
if class_name in gt_counter_per_class: |
|
gt_counter_per_class[class_name] += 1 |
|
else: |
|
|
|
gt_counter_per_class[class_name] = 1 |
|
|
|
with open(tmp_files_path + "/" + file_id + "_ground_truth.json", 'w') as outfile: |
|
json.dump(bounding_boxes, outfile) |
|
|
|
gt_classes = list(gt_counter_per_class.keys()) |
|
|
|
gt_classes = sorted(gt_classes) |
|
n_classes = len(gt_classes) |
|
|
|
|
|
|
|
""" |
|
Check format of the flag --set-class-iou (if used) |
|
e.g. check if class exists |
|
""" |
|
if specific_iou_flagged: |
|
n_args = len(args.set_class_iou) |
|
error_msg = \ |
|
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]' |
|
if n_args % 2 != 0: |
|
error('Error, missing arguments. Flag usage:' + error_msg) |
|
|
|
|
|
specific_iou_classes = args.set_class_iou[::2] |
|
|
|
iou_list = args.set_class_iou[1::2] |
|
if len(specific_iou_classes) != len(iou_list): |
|
error('Error, missing arguments. Flag usage:' + error_msg) |
|
for tmp_class in specific_iou_classes: |
|
if tmp_class not in gt_classes: |
|
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg) |
|
for num in iou_list: |
|
if not is_float_between_0_and_1(num): |
|
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg) |
|
|
|
""" |
|
Predicted |
|
Load each of the predicted files into a temporary ".json" file. |
|
""" |
|
|
|
predicted_files_list = glob.glob('predicted/*.txt') |
|
predicted_files_list.sort() |
|
|
|
for class_index, class_name in enumerate(gt_classes): |
|
bounding_boxes = [] |
|
for txt_file in predicted_files_list: |
|
|
|
|
|
file_id = txt_file.split(".txt",1)[0] |
|
file_id = os.path.basename(os.path.normpath(file_id)) |
|
if class_index == 0: |
|
if not os.path.exists('ground-truth/' + file_id + ".txt"): |
|
error_msg = "Error. File not found: ground-truth/" + file_id + ".txt\n" |
|
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)" |
|
error(error_msg) |
|
lines = file_lines_to_list(txt_file) |
|
for line in lines: |
|
try: |
|
tmp_class_name, confidence, left, top, right, bottom = line.split() |
|
except ValueError: |
|
error_msg = "Error: File " + txt_file + " in the wrong format.\n" |
|
error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n" |
|
error_msg += " Received: " + line |
|
error(error_msg) |
|
if tmp_class_name == class_name: |
|
|
|
bbox = left + " " + top + " " + right + " " +bottom |
|
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox}) |
|
|
|
|
|
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True) |
|
with open(tmp_files_path + "/" + class_name + "_predictions.json", 'w') as outfile: |
|
json.dump(bounding_boxes, outfile) |
|
|
|
""" |
|
Calculate the AP for each class |
|
""" |
|
sum_AP = 0.0 |
|
ap_dictionary = {} |
|
|
|
with open(results_files_path + "/results.txt", 'w') as results_file: |
|
results_file.write("# AP and precision/recall per class\n") |
|
count_true_positives = {} |
|
for class_index, class_name in enumerate(gt_classes): |
|
count_true_positives[class_name] = 0 |
|
""" |
|
Load predictions of that class |
|
""" |
|
predictions_file = tmp_files_path + "/" + class_name + "_predictions.json" |
|
predictions_data = json.load(open(predictions_file)) |
|
|
|
""" |
|
Assign predictions to ground truth objects |
|
""" |
|
nd = len(predictions_data) |
|
tp = [0] * nd |
|
fp = [0] * nd |
|
for idx, prediction in enumerate(predictions_data): |
|
file_id = prediction["file_id"] |
|
if show_animation: |
|
|
|
ground_truth_img = glob.glob1(img_path, file_id + ".*") |
|
|
|
if len(ground_truth_img) == 0: |
|
error("Error. Image not found with id: " + file_id) |
|
elif len(ground_truth_img) > 1: |
|
error("Error. Multiple image with id: " + file_id) |
|
else: |
|
|
|
|
|
img = cv2.imread(img_path + "/" + ground_truth_img[0]) |
|
|
|
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0] |
|
if os.path.isfile(img_cumulative_path): |
|
img_cumulative = cv2.imread(img_cumulative_path) |
|
else: |
|
img_cumulative = img.copy() |
|
|
|
bottom_border = 60 |
|
BLACK = [0, 0, 0] |
|
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK) |
|
|
|
|
|
gt_file = tmp_files_path + "/" + file_id + "_ground_truth.json" |
|
ground_truth_data = json.load(open(gt_file)) |
|
ovmax = -1 |
|
gt_match = -1 |
|
|
|
bb = [ float(x) for x in prediction["bbox"].split() ] |
|
for obj in ground_truth_data: |
|
|
|
if obj["class_name"] == class_name: |
|
bbgt = [ float(x) for x in obj["bbox"].split() ] |
|
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])] |
|
iw = bi[2] - bi[0] + 1 |
|
ih = bi[3] - bi[1] + 1 |
|
if iw > 0 and ih > 0: |
|
|
|
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0] |
|
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih |
|
ov = iw * ih / ua |
|
if ov > ovmax: |
|
ovmax = ov |
|
gt_match = obj |
|
|
|
|
|
if show_animation: |
|
status = "NO MATCH FOUND!" |
|
|
|
min_overlap = MINOVERLAP |
|
if specific_iou_flagged: |
|
if class_name in specific_iou_classes: |
|
index = specific_iou_classes.index(class_name) |
|
min_overlap = float(iou_list[index]) |
|
if ovmax >= min_overlap: |
|
if "difficult" not in gt_match: |
|
if not bool(gt_match["used"]): |
|
|
|
tp[idx] = 1 |
|
gt_match["used"] = True |
|
count_true_positives[class_name] += 1 |
|
|
|
with open(gt_file, 'w') as f: |
|
f.write(json.dumps(ground_truth_data)) |
|
if show_animation: |
|
status = "MATCH!" |
|
else: |
|
|
|
fp[idx] = 1 |
|
if show_animation: |
|
status = "REPEATED MATCH!" |
|
else: |
|
|
|
fp[idx] = 1 |
|
if ovmax > 0: |
|
status = "INSUFFICIENT OVERLAP" |
|
|
|
""" |
|
Draw image to show animation |
|
""" |
|
if show_animation: |
|
height, widht = img.shape[:2] |
|
|
|
white = (255,255,255) |
|
light_blue = (255,200,100) |
|
green = (0,255,0) |
|
light_red = (30,30,255) |
|
|
|
margin = 10 |
|
v_pos = int(height - margin - (bottom_border / 2)) |
|
text = "Image: " + ground_truth_img[0] + " " |
|
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0) |
|
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " " |
|
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width) |
|
if ovmax != -1: |
|
color = light_red |
|
if status == "INSUFFICIENT OVERLAP": |
|
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100) |
|
else: |
|
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100) |
|
color = green |
|
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width) |
|
|
|
v_pos += int(bottom_border / 2) |
|
rank_pos = str(idx+1) |
|
text = "Prediction #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(prediction["confidence"])*100) |
|
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0) |
|
color = light_red |
|
if status == "MATCH!": |
|
color = green |
|
text = "Result: " + status + " " |
|
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width) |
|
|
|
font = cv2.FONT_HERSHEY_SIMPLEX |
|
if ovmax > 0: |
|
bbgt = [ int(x) for x in gt_match["bbox"].split() ] |
|
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2) |
|
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2) |
|
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA) |
|
bb = [int(i) for i in bb] |
|
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2) |
|
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2) |
|
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA) |
|
|
|
cv2.imshow("Animation", img) |
|
cv2.waitKey(20) |
|
|
|
output_img_path = results_files_path + "/images/single_predictions/" + class_name + "_prediction" + str(idx) + ".jpg" |
|
cv2.imwrite(output_img_path, img) |
|
|
|
cv2.imwrite(img_cumulative_path, img_cumulative) |
|
|
|
|
|
|
|
cumsum = 0 |
|
for idx, val in enumerate(fp): |
|
fp[idx] += cumsum |
|
cumsum += val |
|
cumsum = 0 |
|
for idx, val in enumerate(tp): |
|
tp[idx] += cumsum |
|
cumsum += val |
|
|
|
rec = tp[:] |
|
for idx, val in enumerate(tp): |
|
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name] |
|
|
|
prec = tp[:] |
|
for idx, val in enumerate(tp): |
|
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx]) |
|
|
|
|
|
ap, mrec, mprec = voc_ap(rec, prec) |
|
sum_AP += ap |
|
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " |
|
""" |
|
Write to results.txt |
|
""" |
|
rounded_prec = [ '%.2f' % elem for elem in prec ] |
|
rounded_rec = [ '%.2f' % elem for elem in rec ] |
|
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n") |
|
if not args.quiet: |
|
print(text) |
|
ap_dictionary[class_name] = ap |
|
|
|
""" |
|
Draw plot |
|
""" |
|
if draw_plot: |
|
plt.plot(rec, prec, '-o') |
|
|
|
|
|
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]] |
|
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]] |
|
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r') |
|
|
|
fig = plt.gcf() |
|
fig.canvas.set_window_title('AP ' + class_name) |
|
|
|
plt.title('class: ' + text) |
|
|
|
|
|
plt.xlabel('Recall') |
|
plt.ylabel('Precision') |
|
|
|
axes = plt.gca() |
|
axes.set_xlim([0.0,1.0]) |
|
axes.set_ylim([0.0,1.05]) |
|
|
|
|
|
|
|
|
|
|
|
fig.savefig(results_files_path + "/classes/" + class_name + ".png") |
|
plt.cla() |
|
|
|
if show_animation: |
|
cv2.destroyAllWindows() |
|
|
|
results_file.write("\n# mAP of all classes\n") |
|
mAP = sum_AP / n_classes |
|
text = "mAP = {0:.2f}%".format(mAP*100) |
|
results_file.write(text + "\n") |
|
print(text) |
|
|
|
|
|
shutil.rmtree(tmp_files_path) |
|
|
|
""" |
|
Count total of Predictions |
|
""" |
|
|
|
pred_counter_per_class = {} |
|
|
|
for txt_file in predicted_files_list: |
|
|
|
lines_list = file_lines_to_list(txt_file) |
|
for line in lines_list: |
|
class_name = line.split()[0] |
|
|
|
if class_name in args.ignore: |
|
continue |
|
|
|
if class_name in pred_counter_per_class: |
|
pred_counter_per_class[class_name] += 1 |
|
else: |
|
|
|
pred_counter_per_class[class_name] = 1 |
|
|
|
pred_classes = list(pred_counter_per_class.keys()) |
|
|
|
|
|
""" |
|
Plot the total number of occurences of each class in the ground-truth |
|
""" |
|
if draw_plot: |
|
window_title = "Ground-Truth Info" |
|
plot_title = "Ground-Truth\n" |
|
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)" |
|
x_label = "Number of objects per class" |
|
output_path = results_files_path + "/Ground-Truth Info.png" |
|
to_show = False |
|
plot_color = 'forestgreen' |
|
draw_plot_func( |
|
gt_counter_per_class, |
|
n_classes, |
|
window_title, |
|
plot_title, |
|
x_label, |
|
output_path, |
|
to_show, |
|
plot_color, |
|
'', |
|
) |
|
|
|
""" |
|
Write number of ground-truth objects per class to results.txt |
|
""" |
|
with open(results_files_path + "/results.txt", 'a') as results_file: |
|
results_file.write("\n# Number of ground-truth objects per class\n") |
|
for class_name in sorted(gt_counter_per_class): |
|
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n") |
|
|
|
""" |
|
Finish counting true positives |
|
""" |
|
for class_name in pred_classes: |
|
|
|
if class_name not in gt_classes: |
|
count_true_positives[class_name] = 0 |
|
|
|
|
|
""" |
|
Plot the total number of occurences of each class in the "predicted" folder |
|
""" |
|
if draw_plot: |
|
window_title = "Predicted Objects Info" |
|
|
|
plot_title = "Predicted Objects\n" |
|
plot_title += "(" + str(len(predicted_files_list)) + " files and " |
|
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(pred_counter_per_class.values())) |
|
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)" |
|
|
|
x_label = "Number of objects per class" |
|
output_path = results_files_path + "/Predicted Objects Info.png" |
|
to_show = False |
|
plot_color = 'forestgreen' |
|
true_p_bar = count_true_positives |
|
draw_plot_func( |
|
pred_counter_per_class, |
|
len(pred_counter_per_class), |
|
window_title, |
|
plot_title, |
|
x_label, |
|
output_path, |
|
to_show, |
|
plot_color, |
|
true_p_bar |
|
) |
|
|
|
""" |
|
Write number of predicted objects per class to results.txt |
|
""" |
|
with open(results_files_path + "/results", 'a') as results_file: |
|
results_file.write("\n# Number of predicted objects per class\n") |
|
for class_name in sorted(pred_classes): |
|
n_pred = pred_counter_per_class[class_name] |
|
text = class_name + ": " + str(n_pred) |
|
text += " (tp:" + str(count_true_positives[class_name]) + "" |
|
text += ", fp:" + str(n_pred - count_true_positives[class_name]) + ")\n" |
|
results_file.write(text) |
|
|
|
""" |
|
Draw mAP plot (Show AP's of all classes in decreasing order) |
|
""" |
|
if draw_plot: |
|
window_title = "mAP" |
|
plot_title = "mAP = {0:.2f}%".format(mAP*100) |
|
x_label = "Average Precision" |
|
output_path = results_files_path + "/mAP.png" |
|
to_show = True |
|
plot_color = 'royalblue' |
|
draw_plot_func( |
|
ap_dictionary, |
|
n_classes, |
|
window_title, |
|
plot_title, |
|
x_label, |
|
output_path, |
|
to_show, |
|
plot_color, |
|
"" |
|
) |
|
|