import sys, os
sys.path.append(os.path.abspath(os.getcwd()))
import math
from collections import defaultdict
from MOT_metrics import MOTMetrics
from Evaluator import Evaluator, run_metrics
import multiprocessing as mp
import pandas as pd


class Neophocaena_evaluator(Evaluator):
	def __init__(self):
		super().__init__()
		#self.type = "MOT"
	def eval(self):
		arguments = []
		for seq, res, gt in zip(self.sequences, self.tsfiles, self.gtfiles):

			arguments.append({"metricObject": MOTMetrics(seq), "args" : {
			"gtDataDir":  os.path.join(os.path.dirname(gt),seq),
			"sequence": str(seq) ,
			"pred_file":res,
			"gt_file": gt,
			"benchmark_name": self.benchmark_name}})
		try:
			if self.MULTIPROCESSING:
				p = mp.Pool(self.NR_CORES)
				print("Evaluating on {} cpu cores".format(self.NR_CORES))
				processes = [p.apply_async(run_metrics, kwds=inp) for inp in arguments]
				self.results = [p.get() for p in processes]
				p.close()
				p.join()

			else:
				self.results = [run_metrics(**inp) for inp in arguments]
			self.failed = False
		except:
			self.failed = True
			raise Exception("<exc> MATLAB evalutation failed <!exc>")
		self.Overall_Results = MOTMetrics("OVERALL")
		return self.results,self.Overall_Results


if __name__ == "__main__":
	eval = Neophocaena_evaluator()

	benchmark_name = "Neophocaena"
	gt_dir = "./results"
	res_dir = "./results"
	eval.run(
	    benchmark_name = benchmark_name,
	    gt_dir = gt_dir,
	    mot_dir = res_dir,
	    save_csv='NeophocaenaOverall.csv')
