#!/usr/bin/env python

import sys
from nlu import NLU
from db import DB
from glob import glob
from os.path import basename

db = DB()
nlu = NLU(db=db)

UTTERANCES_DIR = 'utterances/'

equiv_action = {None: None}
for f in glob(UTTERANCES_DIR+'*'):
    bf = basename(f)
    equivs = open(f).readline().strip().split(',')
    for equiv in equivs:
        equiv_action[equiv] = bf

def test_utterances(utterances):
    results = {}
    for utterance in utterances:
        frame = nlu.parse_user_response(utterance)
        action = equiv_action[frame.action]
        if not results.has_key(action):
            results[action] = 0
        results[action] += 1
    return results

def test_all_utterances():
    confu = {}
    for f in glob(UTTERANCES_DIR+'*'):
        bf = basename(f)
        f_in = open(f)
        f_in.readline() # skip first line; dirty hack for now
        confu[bf] = test_utterances(f_in.readlines())
    
    # output confusion matrix:
    keys = confu.keys()
    sys.stdout.write("          ")
    for key in keys:
        sys.stdout.write("%10s" % key)
    sys.stdout.write('      None\n')
    for cat in keys:
        sys.stdout.write("%-10s" % cat)
        results = confu[cat]
        for guess in keys+[None]:
            if not results.has_key(guess):
                results[guess] = 0
            sys.stdout.write("%10d" % results[guess])
        sys.stdout.write('\n')
    
    # output overall accuracy
    correct = total = 0
    for cat in keys:
        for guess in confu[cat]:
            if cat == guess: correct += confu[cat][guess]
            total += confu[cat][guess]
    print "\noverall accuracy: %.2f" % ( float(correct) / total )
    
    # output precision, recall, and f1 for each category
    for cat in keys:
        print "\n", cat
        p_den = sum(map(lambda x: confu[x][cat], confu))
        if p_den > 0:
            precision = float(confu[cat][cat]) / p_den
        else:
            precision = 0
        r_den = sum(map(lambda x: confu[cat][x], confu))
        if r_den > 0:
            recall = float(confu[cat][cat]) / r_den
        else:
            recall = 0
        if precision + recall == 0:
            fscore = 0
        else:
            fscore = 2 * precision * recall / (precision + recall)
        print "precision: %.2f" % precision
        print "recall: %.2f" % recall
        print "f-score: %.2f" % fscore

if __name__ == '__main__':
    test_all_utterances()
