#!/usr/bin/env python
import nltk
import csv
import sys
from math import log

NUM_DOCS = 3000
NUM_WORDS = 1000
TOKEN_FILE = 'tokens.txt'

def get_data(filename):
    print 'Getting data from %s'%filename
    with open(filename, 'r') as csvfile:
        return [row for row in csv.DictReader(csvfile, delimiter=',', quotechar='"')]
def get_counts(data):
    all_tokens = []
    for i,row in enumerate(data):
        print 'Tokenizing %d of %d documents'%(i,len(data))
        row['tokens'] = nltk.word_tokenize(row['BodyMarkdown'].lower())
        all_tokens.extend(row['tokens'])
    counts = {t:[[0,0],[0,0]] for t in nltk.FreqDist(all_tokens).keys()}
    for i,row in enumerate(data):
        print 'Counting %d of %d documents'%(i,len(data))
        j = int(row['OpenStatus'] == 'open')
        for t in counts:
            i = int(t in row['tokens'])
            counts[t][i][j] += 1
    return counts
def get_pmi(counts):
    token_pmi = {}
    for i,(t,c) in enumerate(counts.items()):
        print 'Getting PMI for %d of %d documents'%(i,len(counts))
        c_all = float(c[0][0] + c[0][1] + c[1][0] + c[1][1])
        # Ignore token if there are fewer than 3 in total or
        # if there are none in at least 1 category
        if c_all < 3 or c[0][0]*c[0][1]*c[1][0]*c[1][1] == 0:
            continue
        c_0x = float(c[0][0] + c[0][1])
        c_x0 = float(c[0][0] + c[1][0])
        c_1x = float(c[1][0] + c[1][1])
        c_x1 = float(c[0][1] + c[1][1])
        '''
        Math taken from:
        http://nlp.stanford.edu/IR-book/html/htmledition/mutual-information-1.html
        '''
        token_pmi[t] = (c[1][1]/c_all)*log((c_all*c[1][1])/(c_1x*c_x1),2) + \
            (c[0][1]/c_all)*log((c_all*c[0][1])/(c_0x*c_x1),2) + \
            (c[1][0]/c_all)*log((c_all*c[1][0])/(c_1x*c_x0),2) + \
            (c[0][0]/c_all)*log((c_all*c[0][0])/(c_0x*c_x0),2)
    return token_pmi
if __name__ == '__main__':
    data = get_data(sys.argv[1])[:NUM_DOCS]
    counts = get_counts(data)
    token_pmi = get_pmi(counts)
    with open(TOKEN_FILE, 'w') as token_file:
        for t,pmi in sorted(token_pmi.items(), key=lambda item: item[1], reverse=True)[:NUM_WORDS]:
            token_file.write('%s:%f\n'%(t,pmi))
