#!/usr/bin/env python

import sys, os
import random
import time
import math
import json
from collections import defaultdict
from operator import itemgetter

from ..object.textual_features_concat import get_all_concats_TF_weighted
from ..metrics.term_freq import compute_FF_and_train_length, compute_FS, compute_internal_frequency_metrics
from ..metrics.cooccur import compute_cooccur_and_entropy_field
from ..personalized.inout import load_list, print_list

from ..recommender.sum_plus import sum_plus_score
from util import *


# Computa metricas e recomenda
#
# Entrada: test_object: atributos textuais do objeto
#              intags: lista de tags de entrada
#                  fs: Feature Spread dos atributos textuais considerados
#         confidences: Confiancas das regras de associacao
#                ftag: frequencia em que uma palavra aparece como tag
#                  k*, alpha, beta: parametros de ajuste
#      metric_number: valor em [0,3] que identifica a metrica de poder descritivo utilizada
#
# Saida: Tags e seus valores de relevancia estimados


def sum_plus_descriptive_power_score(test_object, intags, fs, confidences, ftag, k_ant, k_cons, k_r, stabilities, alpha, metric_number):
    beta = 1.0 - alpha
    candidates = sum_plus_score(intags, confidences, ftag, k_ant, k_cons, k_r, stabilities)

    if len(candidates) > 0:
        max_conf_score = max(candidates.values())
        for c in candidates:
            candidates[c] *= (alpha/max_conf_score)

    metrics = compute_internal_frequency_metrics(test_object, fs)
    new_candidates = {}
    for t in metrics:
        if t not in intags:
            new_candidates[t] = metrics[t][metric_number]

    if len(new_candidates) > 0:
        maxv = max(new_candidates.values())
    else:
        maxv = 1.0
    for t in new_candidates:
        candidates[t] += (beta * new_candidates[t] / maxv)

    return candidates

def sum_plus_e_recommender_func(input_dict):
    train_filename = input_dict['train_filename']
    minsup = float(input_dict['min_support'])
    minconf = float(input_dict['min_confidence'])
    k_ant = float(input_dict['k_ant'])
    k_cons = float(input_dict['k_cons'])
    k_r = float(input_dict['k_r'])
    alpha = float(input_dict['alpha'])
    num_rec = int(input_dict['num_recommends'])
    metric_number = int(input_dict['metric_number'])
    confidence_status = int(input_dict['confidences_status'])
    confidences_filename = input_dict['confidences_filename']
    fs_file_status = int(input_dict['fs_file_status'])
    fs_filename = input_dict['fs_filename']
    ff_filename = input_dict['ff_filename']
    
    if metric_number < 0 or metric_number > 3:
        print >>sys.stderr, "Metric number must be between 0 and 3"
        sys.exit(-1)

    if (fs_file_status == 0):
    	#Computa Feature Spread de cada atributo textual da lista passada como parametro 
    	fs = compute_FS(train_filename, ["TITLE", "DESCRIPTION"])
	with open(fs_filename, 'w') as file:
		file.write(json.dumps(fs))
	
    if(confidence_status == 0):
    	#ff = Feature Frequency (frequencia de uma palavra em uma Textual Feature (ex. Tags)
    	(ff, n) = compute_FF_and_train_length(train_filename, ["TAG"])
	print "Step #1 ready!"
    	#computa as confiancas das regras de associacao obtidas do conjunto de objetos de treino
    	confidences = compute_cooccur_and_entropy_field(train_filename, ff["TAG"], minsup, minconf, "TAG")[0]
	print "Step #2 ready!"
	with open(ff_filename, 'w') as file:
		file.write(json.dumps(ff))
	with open(confidences_filename, 'w') as file:
		file.write(json.dumps(confidences))
    
    ff = defaultdict() 
    with open(ff_filename, 'r')  as file:
    	ff = json.loads(file.read())
    fs = defaultdict()
    with open(fs_filename, 'r') as file:
	fs = json.loads(file.read())
    confidences = defaultdict()
    with open(confidences_filename, 'r') as file:
    	confidences = json.loads(file.read())

    # Loops for each tuple of file(test_file, inputtag_file)
    output_file_list = []
    for tuple_filename in input_dict['test_inputtag_filename_list']:
    	with open(tuple_filename[0], 'r') as test_file:
		with open(tuple_filename[1], 'r') as input_tags:
    			test_objects = get_all_concats_TF_weighted(test_file)
  	  		stabilities = {}
   			
			output_filename = input_dict['output_dir'] + '/' + tuple_filename[0].split("test.")[1].strip()
			output_file_list.append(output_filename.strip())
    			with open(output_filename, 'w') as output_file:
    				for line in input_tags:
			        	split = load_list(line.strip(), " ")
				        intags = [x for x in split[2:]]
			        	rid = split[1]

				        test_object = test_objects[rid]
        
			        	#Gera tags candidatas e as ordena pelo ranking produzido
				        cand = sum_plus_descriptive_power_score(test_object, intags, fs, confidences, ff["TAG"], k_ant, k_cons, k_r, stabilities, alpha, metric_number)
			        	rec = get_top_ranked(cand, num_rec)
					output_file.write(rid)
				        for w in rec:
        					output_file.write(' ' + str(w))
					output_file.write('\n')
    return output_file_list
