# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.

__revision__ = "$Id$"

import math
import os
import time
import datetime
import random
import tempfile
from marshal import loads
from zlib import decompress

from invenio.config import CFG_SITE_URL, CFG_SITE_LANG
from invenio.dbquery import run_sql, serialize_via_marshal, deserialize_via_marshal
from invenio.messages import gettext_set_language
from invenio.bibrank_grapher import create_temporary_image, write_coordinates_in_tmp_file, remove_old_img
from invenio.bibrank_citation_searcher import calculate_cited_by_list

def dict_2_value_frequence_table(dictionary):
    """
    Create value_frequence_table from all available data
    Input:
        - dictionary : contains all pairs (recordID - value)
    Output:
        - value_frequence_table
    """
    value_frequence_table = {}
    for key, value in dictionary.iteritems():
        if (value_frequence_table.has_key(value)):
            value_frequence_table[value] = int(value_frequence_table[value]) + 1
        else:
            value_frequence_table[value] = 1

    print "value_frequence_table :"
    for key, value in value_frequence_table.iteritems():
        print str(key) + "-->>" + str(value)

    return value_frequence_table

def create_value_frequence_table(attribute_file_name, level_dir, depth):
    """
    Read recordIDs from all queries and its corresponding values from attribute file
    Create value_frequence_table
    Input:
        - attribute_file_name : file contains all scores of this attribute
        - level_dir : the directory that contains all queries of one of the following types : (recalls, displays, seen).
        - depth : Only the first "depth" recordIDs are read from all queries.
        For example: create_value_frequence_table("008.dat", "recalls/", 200). It means that the first 200 recordIDs of each queries
        at level "recalls" will be accumulated.
    Output:
        - value_frequence_table : cumulated frequences of value.
        - data_dict : contains all pairs : recID - value
    """
    directory = "/home/lebaoanh/Desktop/data/cdswebdev.cern.ch/admin/bibrank/data/"

    data_dict = {}

    # Attribute
    attribute_dict = {}
    data_file = open(directory+attribute_file_name, 'r')
    for line in data_file.readlines():
        splited_line = line.split(" | ")
        key = int(splited_line[0])
        attribute_dict[key] = float(splited_line[1])
    data_file.close()

    # Level
    file_name_list = os.listdir(directory+level_dir)

    for index in range(0,len(file_name_list)):

        print "file_name : " + str(file_name_list[index])

        rec_list = []
        data_file = open(directory+level_dir+file_name_list[index], 'r')
        line = data_file.readlines()
        line = line[0][1:][:-1]
        rec_list_all = line.split()
        # cut with the first "depth" recordIDs
        if (len(rec_list_all) > depth):
            for i in range(0, depth):
                rec_list.append(rec_list_all[i])
        else:
            rec_list = rec_list_all

        # look up the corresponding value in the attribute file
        while rec_list:
            recID = int(rec_list.pop())
            if attribute_dict.has_key(recID):
                print str(recID) + str(" -> ") + str(attribute_dict[recID])
                data_dict[recID] = float(attribute_dict[recID])
            else:
                print str(recID) + str(" -> ") + "0.0"
                data_dict[recID] = 0.0
        data_file.close()

    for key, value in data_dict.iteritems():
        print str(key) + "-->>" + str(value)

    # Create the value_frequence_table
    value_frequence_table = dict_2_value_frequence_table(data_dict)

    return value_frequence_table, data_dict


def get_bibrec_dict_with_method_score_from_file(recID_file_name, data_file_name):
    """
    Get all recordIDs and corresponding values
    Note that this function was used to test with data from Ludmila and Martin
    Input:
        - recID_file_name : level file name
        - data_file_name : scores attributes file name
    Output:
        - bibrec_dict (dictionary) : all pairs (recordID - value)
    """
    bibrec_dict = {}

    recID_file = open(recID_file_name, 'r')
    for line in recID_file.readlines():
        recID = int(line)
        bibrec_dict.setdefault(recID, 0.0)
        print "recID = " + str(recID)
    recID_file.close()

    data_file = open(data_file_name, 'r')
    for line in data_file.readlines():
        splited_line = str.split(line)
        recID = int(splited_line[0])
        value = int(splited_line[1])
        bibrec_dict.__setitem__(recID,value)
        print "recID = " + str(recID) + str(" ----> ") + str("value = ") + str(value)
    data_file.close()
    print "It's finished !!!"
    return bibrec_dict

def create_LUT_from_file(value_frequence_table, rank_method_code, nb_elements_LUT, is_rescaling):
    """
    This function is similar like to create_LUT
    Input:
        - value_frequence_table : all pairs (value - frequence)
        - rank_method_code :
        - nb_element_LUT : number of elements of Approximated LUT
        - is_rescaling : True -> rescale, False -> do not rescale
    Output:
        - raw_LUT : Original LUT
        - appro_LUT : Approximated LUT
        - Save these LUTs into database
    """
    raw_LUT = {}

    s = sum(value_frequence_table.values())
    sum_acc = 0.0
    for key in sorted(value_frequence_table.keys()):
        sum_acc = sum_acc + value_frequence_table[key]
        raw_LUT[key] = float(sum_acc)/float(s)

#    raw_LUT.setdefault(0,float(0))

    if is_rescaling:
        raw_LUT = rescaling(raw_LUT)

    # save LUT into database
    into_db_dRank(1, raw_LUT, str(rank_method_code))

    # Approximated_LUT

    value_min = min(raw_LUT.keys())
    value_max = max(raw_LUT.keys())

    appro_LUT = {}
    appro_LUT[value_min] = raw_LUT[value_min]
    gap = (float(value_max) - float(value_min))/(float(nb_elements_LUT) - 1)
    value_previous = value_min
    for element_index in range(1, nb_elements_LUT):
        key = float(value_previous) + float(gap)
        value_previous = key
        appro_LUT[key] = normalize_using_LUT(1,rank_method_code, [key])[key]

    if is_rescaling:
        appro_LUT = rescaling(appro_LUT)

    # save appro_LUT into database
    into_db_dRank(2, appro_LUT, str(rank_method_code))

    return raw_LUT, appro_LUT


def normalize_data_from_file(recID_file_name, data_file_name):
    """
    Normalize all data from file
    Input:
        - recID_file_name : file that contains all recordIDs
        - data_file_name : file that contains score attributes
    Output:
        - normalized_data : return all normalized data.
    """
    data_dict = get_bibrec_dict_with_method_score_from_file(recID_file_name, data_file_name)

    normalized_data = {}
    for k, v in data_dict.iteritems():
        normalized_value = normalize_using_LUT("d_rank", v)
        normalized_data.setdefault(k, normalized_value)

    for k, v in normalized_data.iteritems():
        if v != 0.0:
            print str(k) + " : " + str(v)

    # save normalized data into database
    #into_db(normalized_data, rank_method_code)

    return normalized_data


def get_id_rnkMETHOD(rank_method_code):
    """
    Return id_rnkMETHOD that corresponds to rank_method_code
    Input:
        - rank_method_code
    Output:
        - id_rnkMETHOD
    """
    sql_string = "SELECT id from rnkMETHOD where name = '%s'" %rank_method_code
    result = run_sql(sql_string)
    if result and result[0] and result[0][0]:
        id_rnkMETHOD = result[0][0]
    return str(id_rnkMETHOD)

def get_nb_zero_values(rank_method_code):
    """
    Get number of zero-values based on rnkMETHODDATA and bibrec
    Input:
        - rank_method_code
    Output:
        - number of zero-values
    """

    nb_zero_values = 0

    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    result = run_sql("SELECT relevance_data FROM rnkMETHODDATA WHERE id_rnkMETHOD = %s", (id_rnkMETHOD,))

    data_dict = {}
    if result and result[0] and result[0][0]:
        data_dict = deserialize_via_marshal(result[0][0])

        for (key, value) in data_dict.iteritems():
            # do not accept the zero-value in bibrec_dict
            if value == 0:
                nb_zero_values = nb_zero_values + 1

    result = run_sql("SELECT id FROM bibrec")
    for i in range(0, len(result)):
        id = result[i][0]
        if not data_dict.has_key(id):
            nb_zero_values = nb_zero_values + 1

    return nb_zero_values


def get_all_bibrec_scores_fromDB(rank_method_code):
    """
    Get all of recID exist in the database and their values for each method
    Input:
        - rank_method_code
    Output:
        - A dictionary of recID and their non-value
        - A list of recIDs has zero-value
    """

    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    result = run_sql("SELECT * FROM rnkMETHODDATA WHERE id_rnkMETHOD = %s", (id_rnkMETHOD,))
    if result and result[0] and result[0][0]:
        pass
    else:
        print "id_rnkMETHOD :" + str(id_rnkMETHOD) + " not found! "
        return

    bibrec_dict = {}
    bibrec_list = []

    # update the true value for some items in the dict
    result = run_sql("SELECT relevance_data FROM rnkMETHODDATA WHERE id_rnkMETHOD = %s", (id_rnkMETHOD,))

    data_dict = {}
    if result and result[0] and result[0][0]:
        data_dict = deserialize_via_marshal(result[0][0])

        print "bibrec_dict : "
        for (key, value) in data_dict.iteritems():
            # do not accept the zero-value in bibrec_dict
            if value != 0:
                bibrec_dict[key] = value
                print key, "----->", value

    # Read data from bibrec table and set values temporarily = 0
    result = run_sql("SELECT id FROM bibrec")
    for i in range(0, len(result)):
        id = result[i][0]
        if not bibrec_dict.has_key(id):
                bibrec_list.append(id)

    return bibrec_dict, bibrec_list

def get_interpolated_value(x, x1, x2, y1, y2):
    """
    Get the interpolated value base on the two point (x1, y1) and (x2, y2) that x1 < x < x2
    Input:
        - x: new value
        - (x1, y1): the first point of interpolation
        - (x2, y2): the second point of interpolation
    Output:
        - interpolated value of y
    """
    y = float(y1) + ((float(y2) - float(y1))*(float(x) - float(x1)))/(float(x2)-float(x1))

    return y

def rescaling(dictionary):
    """
    Rescale a dictionary into interval[0,1]
    Input:
        - Dictionay
    Output:
        - Rescaled Dictionary
    """
    # Rescaling values
    value_min = min(dictionary.values())
    value_max = max(dictionary.values())
    value_span = float(value_max) - float(value_min)
    for key, value in dictionary.iteritems():
        dictionary[key] = (float(value) - float(value_min))/float(value_span)

    # Rescaling keys
    key_min = min(dictionary.keys())
    key_max = max(dictionary.keys())
    key_span = float(key_max) - float(key_min)
    dict = {}
    for key, value in dictionary.iteritems():
        k = (float(key) - float(key_min))/float(key_span)
        dict[k] = dictionary[key]

    return dict

def create_LUT(rank_method_code, nb_elements_LUT, is_rescaling):
    """
    Create Lookup table from the database for each method. This LUT is stocked on the table rnkMETHODdRankLUT in the database
    Input:
        - rank_method_code
        - nb_elements_LUT: number of elements of LUT after reducing the raw LUT
        - is_rescaling (boolean, true -> rescale, false -> do not rescale): rescale LUT into interval [0,1]
    Output:
        - raw_LUT
        - reduced_LUT
        - raw_LUT into database
        - reduced_LUT into database
    """

    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    print "rank_method_code : " + str(rank_method_code)
    print "id_rnkMETHOD : " + str(id_rnkMETHOD)

    sql_string = "SELECT relevance_data FROM rnkMETHODDATA WHERE id_rnkMETHOD = '%s'" %id_rnkMETHOD
    result = run_sql(sql_string)
    if result and result[0] and result[0][0]:
        sum = 0
        raw_LUT = {}
        data_dict = deserialize_via_marshal(result[0][0])

        value_frequence_table = {}

        for (k, v) in data_dict.iteritems():
            if v != 0:
                sum = sum + 1
                if value_frequence_table.has_key(v):
                    value_frequence_table.__setitem__(v,value_frequence_table.__getitem__(v) + 1)
                else:
                    value_frequence_table.setdefault(v,1)

        # add the zero_values
        nb_zero_values = get_nb_zero_values(rank_method_code)
        value_frequence_table.setdefault(0, nb_zero_values)
        sum = sum + nb_zero_values

        print "value_frequence_table"
        print value_frequence_table

        print sum
        sum_acc = 0
        for k in sorted(value_frequence_table.keys()):
            sum_acc = sum_acc + value_frequence_table.__getitem__(k)
            raw_LUT.setdefault(k,float(sum_acc)/float(sum))

        raw_LUT.setdefault(0,float(0))

        if is_rescaling:
            raw_LUT = rescaling(raw_LUT)

        # save LUT into database
        into_db_dRank(1, raw_LUT, str(rank_method_code))

        # reduced_LUT

        value_min = min(raw_LUT.keys())
        value_max = max(raw_LUT.keys())

        reduced_LUT = {}
        reduced_LUT[value_min] = raw_LUT[value_min]
        print "reduced_LUT[value_min] = " + str(reduced_LUT[value_min])
        gap = (float(value_max) - float(value_min))/(float(nb_elements_LUT) - 1)
        value_previous = value_min
        for element_index in range(1, nb_elements_LUT):
            key = float(value_previous) + float(gap)
            value_previous = key
            reduced_LUT[key] = normalize_using_LUT(1,rank_method_code, [key])[key]
            print "reduced_LUT[key] = " + str(reduced_LUT[key])


        if is_rescaling:
            reduced_LUT = rescaling(reduced_LUT)

        # save reduced_LUT into database
        into_db_dRank(2, reduced_LUT, str(rank_method_code))

        return raw_LUT, reduced_LUT

    else:
        print "id_rnkMETHOD :" + str(id_rnkMETHOD) + " not found! "

def normalize_using_LUT(LUT_type, rank_method_code, values_list):
    """
    Calculate the normalized value by using the lookup table and the interpolation
    Input:
        - LUT_type: raw_LUT (1), reduced_LUT (2)
        - rank_method_code
        - values_list : list of value input
    Output:
        - normalized value
    """

    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    if LUT_type == 1:
        sql_string = "SELECT raw_LUT FROM rnkMETHODdRank WHERE id_rnkMETHOD = '%s'" %id_rnkMETHOD
        result = run_sql(sql_string)
    elif LUT_type == 2:
        sql_string = "SELECT appro_LUT FROM rnkMETHODdRank WHERE id_rnkMETHOD = '%s'" %id_rnkMETHOD
        result = run_sql(sql_string)

    if result and result[0] and result[0][0]:
        lut = deserialize_via_marshal(result[0][0])

    sorted_keys = sorted(lut.keys())
    normalized_dict = {}

    while values_list:
        value = values_list.pop()
        if value < sorted_keys[0]:
            normalized_value = sorted[0]
        elif lut.has_key(value):
             normalized_value = lut[value]
        elif value > max(lut.keys()):
            normalized_value = 1
        else:
            for k in range(0, len(sorted_keys)):
                if (sorted_keys[k] < value) and (sorted_keys[k+1] > value):
                    normalized_value = get_interpolated_value(value, sorted_keys[k], sorted_keys[k+1], lut[sorted_keys[k]], lut[sorted_keys[k+1]])
                    break
        normalized_dict.setdefault(value, normalized_value)

    return normalized_dict


def normalize_db(LUT_type, rank_method_code):
    """
    Normalize all data corresponding with one method in the database
    Input:
        - LUT_type: raw_LUT (1), reduced_LUT (2)
        - rank_method_code
    Output:
        - normalized data
        - save normalized data in the database
    """

    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    sql_string = "SELECT relevance_data FROM rnkMETHODdRankLUT WHERE id_rnkMETHOD = '%s'" %id_rnkMETHOD
    result = run_sql(sql_string)
    if result and result[0] and result[0][0]:
        lut = deserialize_via_marshal(result[0][0])
#        print "lut = "
#        print lut

    data_dict, data_list = get_all_bibrec_scores_fromDB(rank_method_code)

    normalized_data = {}
    normalized_data_tmp = normalize_using_LUT(LUT_type, rank_method_code, data_dict.values())
    for key, value in data_dict.iteritems():
        normalized_data[key] = normalized_data_tmp[value]

    # assign zero-value
    normalized_zero_value = normalize_using_LUT(LUT_type, rank_method_code, [0])[0]
    while data_list:
        recID = data_list.pop()
        normalized_data[recID] = normalized_zero_value

    # save normalized data into database
    into_db(normalized_data, rank_method_code)

    return normalized_data

def merge_2dictionaries(dictionary_1, dictionary_2):
    """
    Input:
        - Dictionary 1
        - Dictionary 2
    Output:
        Merging of two dictionaries -> dictionary_2
    """
    for k in dictionary_1.keys():
        if dictionary_2.has_key(k): continue
        else: dictionary_2[k] = dictionary_1[k]
    print dictionary_2
    return dictionary_2

def separe_into_2dictionaries(dictionary):
    """
    Input:
        - Dictionary
    Output:
        - Dictionary_1 whose the values are zero
        - Dictionary_2 whose the values aren't zero
    """
    # Dictionary whose the values are zero
    dictionary_1 = {}

    #Dictionary whose the values aren't zero
    dictionary_2 = {}

    for k, v in dictionary.iteritems():
        if v == 0:
            dictionary_1[k] = dictionary[k]
        else:
            dictionary_2[k] = dictionary[k]
    return dictionary_1, dictionary_2

def create_inverse_LUT(rank_method_code, LUT_type):
    """
    Invert Lookup Table
    Input:
        - rank_method_code
        - LUT_type: raw_LUT (1), reduced_LUT (2)
    Output:
        - inverse_LUT
    """
    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    if LUT_type == 1:
        result = run_sql("SELECT raw_LUT FROM rnkMETHODdRank WHERE id_rnkMETHOD = %s", (id_rnkMETHOD,))
    elif LUT_type == 2:
        result = run_sql("SELECT reduced_LUT FROM rnkMETHODdRank WHERE id_rnkMETHOD = %s", (id_rnkMETHOD,))

    if result and result[0] and result[0][0]:
        lut = deserialize_via_marshal(result[0][0])

    inverse_LUT = {}

    for key, value in lut.iteritems():
        inverse_LUT[value] = key

    return inverse_LUT

def use_inverse_LUT(inverse_LUT, value):
    """
    Use inverse_LUT
    Input:
        - inverse_LUT
        - value to calculate
    Output:
    """
    sorted_keys = sorted(inverse_LUT.keys())
    normalized_dict = {}

    while values_list:
        value = values_list.pop()
        if value < sorted_keys[0]:
            normalized_value = sorted[0]
        elif lut.has_key(value):
             normalized_value = lut[value]
        elif value > max(lut.keys()):
            normalized_value = 1
        else:
            for k in range(0, len(sorted_keys)):
                if (sorted_keys[k] < value) and (sorted_keys[k+1] > value):
                    normalized_value = get_interpolated_value(value, sorted_keys[k], sorted_keys[k+1], lut[sorted_keys[k]], lut[sorted_keys[k+1]])
                    break
        normalized_dict.setdefault(value, normalized_value)

    return normalized_dict


def del_rank_method_data(rank_method_code):
    """
    Delete the data for a rank method from rnkMETHODdRank table
    Input:
        - rank_method_code
    Output:
    """
    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    sql_string = "DELETE FROM rnkMETHODdRank WHERE id_rnkMETHOD = '%s'" %id_rnkMETHOD
    run_sql(sql_string)

def into_db(dictionary, rank_method_code):
    """
    Writes into the rnkMETHODdRank table the ranking results
    Input:
        - dictionary
        - rank_method_code
    Output:
    """
    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

#    del_rank_method_data(rank_method_code)
    serialized_data = serialize_via_marshal(dictionary)

    result = run_sql("SELECT * FROM rnkMETHODdRank WHERE id_rnkMETHOD = %s", (id_rnkMETHOD,))
    if result and result[0] and result[0][0]:
        # Update data
        run_sql("UPDATE rnkMETHODdRank SET relevance_data = %s WHERE id_rnkMETHOD = %s", (serialized_data,id_rnkMETHOD))
    else:
        # Insert new row
        run_sql("INSERT INTO rnkMETHODdRank(id_rnkMETHOD, relevance_data) VALUES(%s,%s)", (id_rnkMETHOD, serialized_data))

    date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    run_sql("UPDATE rnkMETHODdRank SET data_last_updated = %s WHERE id_rnkMETHOD=%s" ,(date, id_rnkMETHOD))
#    write_message("Finished writing the ranks into rnkMETHOD table", verbose=5)

#def del_LUT(rank_method_code):
#    """
#    Delete the LUT data on rnkMETHODdRank
#    Input:
#        - rank_method_code
#    Output:
#    """
#    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)
#
#    sql_string = "DELETE FROM rnkMETHODdRank WHERE id_rnkMETHOD = '%s'" %id_rnkMETHOD
#    run_sql(sql_string)

def into_db_dRank(data_type, dictionary, rank_method_code):
    """
    Writes into the rnkMETHODdRankLUT table
    Input:
        - data_type: relevance_data (0), raw_LUT (1), appro_LUT (2)
        - dictionary
        - rank_method_code
    Output:
    """
    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)
    serialized_data = serialize_via_marshal(dictionary)

    result = run_sql("SELECT * FROM rnkMETHODdRank WHERE id_rnkMETHOD = %s", (id_rnkMETHOD,))
    if result and result[0] and result[0][0]:
        if data_type == 0:
            # update data
            run_sql("UPDATE rnkMETHODdRank SET relevance_data = %s WHERE id_rnkMETHOD = %s", (serialized_data,id_rnkMETHOD))
        elif data_type == 1:
            run_sql("UPDATE rnkMETHODdRank SET raw_LUT = %s WHERE id_rnkMETHOD = %s", (serialized_data,id_rnkMETHOD))
        else:
            run_sql("UPDATE rnkMETHODdRank SET appro_LUT = %s WHERE id_rnkMETHOD = %s", (serialized_data,id_rnkMETHOD))
    else:
        if data_type == 0:
            # insert new row
            run_sql("INSERT INTO rnkMETHODdRank(id_rnkMETHOD, relevance_data) VALUES(%s,%s)", (id_rnkMETHOD, serialized_data))
        elif data_type == 1:
            run_sql("INSERT INTO rnkMETHODdRank(id_rnkMETHOD, raw_LUT) VALUES(%s,%s)", (id_rnkMETHOD, serialized_data))
        else:
            run_sql("INSERT INTO rnkMETHODdRank(id_rnkMETHOD, appro_LUT) VALUES(%s,%s)", (id_rnkMETHOD, serialized_data))

    date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    if data_type == 0:
        run_sql("UPDATE rnkMETHODdRank SET data_last_updated = %s WHERE id_rnkMETHOD=%s" ,(date, id_rnkMETHOD))
    else:
        run_sql("UPDATE rnkMETHODdRank SET LUT_last_updated = %s WHERE id_rnkMETHOD=%s" ,(date, id_rnkMETHOD))
#    write_message("Finished writing the ranks into rnkMETHOD table", verbose=5)

def show_compressed_data(rank_method_code, field, table_name):
    """
    Show the compressed data in the database
    Input:
        - rank_method_code
        - table_name
    Output:
        - Show in the terminal
    """
    id_rnkMETHOD = get_id_rnkMETHOD(rank_method_code)

    result = run_sql("SELECT %s FROM %s WHERE id_rnkMETHOD = %s" ,(field, table_name, id_rnkMETHOD))
    if result and result[0] and result[0][0]:
        data_dict = deserialize_via_marshal(result[0][0])
    print "LUT data for method : " + str(rank_method_code)
    for k in sorted(data_dict.keys()):
        print str(k) + " --->>> " + str(data_dict.__getitem__(k))

def extract_parameters(dictionary):
    """
    Make some optimizations and extract the necessary data from dictionary to draw
    Input:
        - dictionary
    Output:
        - keys : string of keys to put into url to sent to Google chart
        - values : string of values to put into url to sent to Google chart
        - x_max : max of keys
        - y_max : max of values
    """
    keys = ""
    values = ""
    for k in sorted(dictionary.keys()):
        key = round(k,3)
        value = round(dictionary[k],4)

        if (str(key)[-2:] == ".0"):
            key = str(key).split(".0")[0]
        if (str(value)[-2:] == ".0"):
            value = str(value).split(".0")[0]

        keys = keys + "," + str(key)
        values = values + "," + str(value)

    keys = keys[1:]
    values = values[1:]

    x_max = max(dictionary.keys())
    y_max = max(dictionary.values())

    return keys, values, x_max, y_max

def split_data_dict(data_dict, test_percentage):
    """
    Split randomly data dictionary into two subset data (dictionaries) using test_percentage
    Input:
        - data_dict : dictionary
    Output:
        - test_dict : test_percentage of data_dict
        - training_dict : data_dict - test_dict
    """
    # Split data into test and training
    key_data = data_dict.keys()
    nb_test = int(len(key_data)*test_percentage)
    nb_training = int(len(key_data) - nb_test)

    test_dict = {}
    training_dict = {}

    # Create test_dict
    for index  in range(0, nb_test):
        key_random = int(key_data.pop(random.randrange(len(key_data))))
        test_dict[key_random] = data_dict[key_random]

    # Create trainning_dict
    while key_data:
        key_random = int(key_data.pop(random.randrange(len(key_data))))
        training_dict[key_random] = data_dict[key_random]

    return test_dict, training_dict

def calculate_epsilon(LUT, test_LUT, training_dict):
    """
    Calculate difference (error) between original LUT and test_LUT by applying training dictionary
    Input:
        - LUT : original LUT
        - test_LUT : LUT was created from test dictionary (approximately 80%)
        - training_dict : training dictionary (approximately 20%)
    Output:
        - epsilon (error)
    """
    epsilon = 0.0

    training_size = float(len(training_dict.keys()))*0.01

    for (key, value) in training_dict.iteritems():
        value_from_LUT = float(normalize_using_LUT(1, "d_rank", [value])[value])
        value_from_test_LUT = float(normalize_using_LUT(2, "test", [value])[value])

        print "value_from_LUT      = " + str(value_from_LUT)
        print "value_from_test_LUT = " + str(value_from_test_LUT)

        epsilon = float(epsilon) + pow((float(value_from_LUT) - float(value_from_test_LUT)), 2)

        print "epsilon = " + str(epsilon)

    epsilon = math.sqrt(float(epsilon)/float(training_size))

    return epsilon

def detect_optimal_size_appro_LUT(dictionary, delta_epsilon):
    """
    This function detects automatically the optimal size of Approximated LUT and return it
    Input:
        - dictionary : epsilon dictionary
        - delta_epsilon : the threshold of epsilon.
    Output:
        - an element of dictionary (optimal size) that the difference between two sides of this point (nb_neighbours of this point)
        is smaller than delta_epsilon. If not, return -1.
    """

    nb_neighbours = 5

    # verify the size input dictionary. If it's smaller than (2*nb_neighbours + 1) then return -1
    if (len(dictionary.keys()) < 2*nb_neighbours + 1):
        return -1

    # otherwise, find the optimal size of Approximated LUT
    for key in range(nb_neighbours + 2, len(dictionary.keys()) - nb_neighbours):
        value = dictionary[key]

        sum = 0.0
        for i in range(1, nb_neighbours + 1):
            sum = float(sum) + float(dictionary[key + i]) - float(dictionary[key - i])

        delta = abs(float(sum)/float(nb_neighbours))

        print "delta : " + str(delta)

        if (delta < delta_epsilon):
            return key
            break
    return -1

def create_charts(dict1, dict2, label1, label2, title):
    """
    Draw the chart by sending url containing data to Google chart
    Input:
        - dict1 : dictionary1 (curve1)
        - dict2 : dictionary2 (curve2)
        - label1 : label for curve1
        - label2 : label for curve2
        - title : title for chart
    Output:
        - url : sent to Google chart
    """
    file = open('chart.txt', 'w')

    keys1, values1, x_max1, y_max1 = extract_parameters(dict1)
    keys2, values2, x_max2, y_max2 = extract_parameters(dict2)

    if x_max1 > x_max2:
        x_max = x_max1
    else:
        x_max = x_max2

    if y_max1 > y_max2:
        y_max = y_max1
    else:
        y_max = y_max2

    chart = """<img src='http://chart.apis.google.com/chart?"""
    chart = chart + str("cht=") + str("lxy") + str("&")
    chart = chart + str("chxt=x,y,r,t&")
    chart = chart + str("chg=10,10&")
    chart = chart + str("chco=FF0000,0000FF&")
    chart = chart + str("chxr=0,0,") + str(x_max) + "|1,0," + str(y_max) + "|2,0," + str(y_max) +"|3,0," + str(x_max) + "&"
    chart = chart + str("chds=0,") + str(x_max) + ",0," + str(y_max) + ",0," + str(x_max) + ",0," + str(y_max) + "&"
    chart = chart + str("chtt=") + str(title) + "&"
    chart = chart + str("chts=0000FF,20&")
    chart = chart + str("chdl=") + str(label1) + "|" + str(label2) + "&"
    chart = chart + str("chdlp=b&")
    chart = chart + str("chs=600x500&")
    chart = chart + str("chd=t:")

    # dict 1
    chart = chart + keys1
    chart = chart + "|"
    chart = chart + values1

    chart = chart + "|"

    # dict 2
    chart = chart + keys2
    chart = chart + "|"
    chart = chart + values2

    chart = chart + """'>"""
    file.write(chart)
    file.close()