#!/usr/bin/env python

# Program substituting itself to the scoring program to test python configuration
# Isabelle Guyon, ChaLearn, September 2014

# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS". 
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRINGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS. 
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL, 
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS, 
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE. 

import os
from sys import argv
import data_io 
from glob import glob
# from numpy import genfromtxt
import json
import yaml


if (os.name == "nt"):
       filesep = '\\'
else:
       filesep = '/'

def recall(actual, predicted, k):
    act_set = set([actual])
    pred_set = set(predicted[:k])
    result = len(act_set & pred_set) / float(len(act_set))
    return result

def compute_score(solution, prediction):
	n = len(solution)
	scores_r_10 = []
	scores_r_50 = []
	for i in range(n):
		
		assert solution[i]["candidate"] == prediction[i]["candidate"]

		scores_r_10.append(recall(solution[i]["target"], prediction[i]["ranking"], 10))
		scores_r_50.append(recall(solution[i]["target"], prediction[i]["ranking"], 50))

	return sum(scores_r_10) / n, sum(scores_r_50) / n

if __name__=="__main__":

	input_dir = '/home/leslie/project/fashion-iq/data/val'
	output_dir = '/home/leslie/project/fashion-iq/data/val'
	
	score_file = open(os.path.join(output_dir, 'scores.txt'), 'wb')
	

	# Compute "real scores"
	# Get all the solution files from the solution directory
	solution_names = glob(os.path.join(input_dir, 'ref', '*.solution.json'))
	print("solution_names:",solution_names)
	# Loop over the files from the solution directory and search for predictions with extension .predict having the same basename
	avg_score = []
	for solution_file in solution_names:
		# Extract the dataset name from the file name
		basename = solution_file[-solution_file[::-1].index(filesep):-solution_file[::-1].index('.')-10]
            
		# Get the prediction from the res subdirectory (must end with '.predict')
		predict_file = os.path.join(input_dir, 'res', basename + '.predict.json')
		print("predict_file:",predict_file)
		print("solution_file:",solution_file)

		if not os.path.isfile(predict_file):
			print('No prediction file for ' + basename)
			score = float('NaN')
		else:
			# Read the solution and prediction values
			solution = json.load(open(solution_file))
			prediction = json.load(open(predict_file))
			# Compute score (simple mean square error)
			print(len(solution),len(prediction))
			r_10, r_50 = compute_score(solution, prediction)
			avg_score.append(r_10)
			avg_score.append(r_50)
			# print(basename + " score (" + basename.capitalize() + "): %0.6f" % score)

		# Write scores to the output file
		print(r_10)
		print(r_50)
		r10 = basename + "_r_10_score: %0.6f\n" % r_10
		r50 = basename + "_r_50_score: %0.6f\n" % r_50
		r10 = r10.encode()
		r50 = r50.encode()
		score_file.write(r10)
		score_file.write(r50)

	aa = "avg_score: %0.6f\n" % (sum(avg_score) / len(avg_score))
	aa = aa.encode()
	score_file.write(aa)
		
	score_file.close()

	# Lots of debug stuff
	data_io.show_io(input_dir, output_dir)
	data_io.show_version()
	
	# # Example html file
	# with open(os.path.join(output_dir, 'scores.html'), 'wb') as html_file:
	# 	html_file.write("<h1>Example HTML file</h1>\nThis shows that we can also have an html file to show extra data.")
		
	exit(0)




