# -*- coding: utf-8 -*-

import numpy as np
import operator
from sklearn.metrics import accuracy_score

def show_compare_results(Y_true_label, Y_prediction_label):
    
    # =================================================
    # Aim: show each categories' accuracy
    #
    # =================================================
    Y_true_label = np.array(Y_true_label)
    Y_prediction_label = np.array(Y_prediction_label)
    unique_labels = np.unique(Y_true_label)
    dict_label_accuracy = {}
    for label in unique_labels:
        index = np.argwhere(np.array(Y_true_label) == label).reshape(-1)
        label_predictions = Y_prediction_label[index]
        label_real = Y_true_label[index]
        dict_label_accuracy[label] = {}
        dict_label_accuracy[label]['Accuracy'] = accuracy_score(label_predictions,label_real)
        dict_label_accuracy[label]['Num'] = len(label_predictions)

    sorted_dict = sorted(dict_label_accuracy.items(),key=lambda d:d[1]['Accuracy'],reverse=True)
    Whole_Accuracy = accuracy_score(Y_prediction_label, Y_true_label) * 100
    print ('\n')
    for item in sorted_dict:
        print ( '%-30s, Num: %-5d, Accuracy: %10.2f %%' %(item[0],item[1]['Num'],item[1]['Accuracy']*100))
    print ('\n')
    print ('Totel accuracy: %.2f %%' % Whole_Accuracy)

if __name__ == "__main__":
    # compare Y_test and Y_test_predictions
    #Y_test  Y_test_predictions
    #  a      a     =
    #  b      c     !=
    #  c      c     =
    #  a      a     =

    Y_test = ['a', 'b', 'c','a']
    Y_test_predictions = ['a', 'c', 'c','a']
    show_compare_results(Y_test,Y_test_predictions)
