#!/usr/bin/env python
import nltk
import csv
import sys
import time
import calendar
from math import log
#from svmutil import *
from liblinearutil import *

TOKEN_FILE = 'tokens.txt'
DATA_FILE = 'data.csv'
LABELS = {'not a real question':0.0091347705760047096,
          'not constructive':0.0046458596397953083,
          'off topic':0.005200965546050945,
          'open':0.9791913907850639,
          'too localized':0.0018270134530850952}
DEBUG = True

def get_tokens(filename):
    with open(filename, 'r') as token_file:
        splits = [s[:-1].split(':') for s in token_file.readlines()]
        token_to_pmi = {s[0]:s[1] for s in splits}
        tokens = [s[0] for s in splits]
        return tokens, token_to_pmi
def tokenize(data):
    for i,row in enumerate(data):
        if DEBUG:
            print 'Tokenizing %d of %d documents'%(i,len(data))
        row['tokens'] = nltk.word_tokenize(row['BodyMarkdown'].lower())
def get_data(filename):
    if DEBUG:
        print 'Getting data from %s'%filename
    with open(filename, 'rb') as csvfile:
        return [row for row in csv.DictReader(csvfile, delimiter=',', quotechar='"')]
def str_to_epoch(str):
    if str == None:
        return 0
    else:
        try:
            return calendar.timegm(time.strptime(str, '%m/%d/%Y %H:%M:%S'))
        except ValueError:
            return 0
def get_features(row, tokens, token_to_pmi):
    #return [int(t in row['tokens']) for t in tokens] + [str_to_epoch(row['PostCreationDate'])-str_to_epoch(row['OwnerCreationDate']),len(row['BodyMarkdown']),len(row['Title'])]
    return [str_to_epoch(row['PostCreationDate'])-str_to_epoch(row['OwnerCreationDate']),len(row['BodyMarkdown']),len(row['Title'])]

if __name__ == '__main__':
    """
    data = get_data(sys.argv[1])
    tokenize(data)
    tokens,token_to_pmi = get_tokens(TOKEN_FILE)
    all_features = []
    all_classes = {l:[] for l in LABELS}
    for i,row in enumerate(data):
        if DEBUG:
             print 'Getting features for %d of %d documents'%(i,len(data))
        all_features.append(get_features(row,tokens,token_to_pmi))
        for key,val in all_classes.items():
             all_classes[key].append(1 if row['OpenStatus'] == key else 0)
    train_features = all_features[:len(data)/2]
    train_classes = {key:val[:len(data)/2] for key,val in all_classes.items()}
    test_features = all_features[len(data)/2:]
    test_classes = {key:val[len(data)/2:] for key,val in all_classes.items()}

    models = {key:svm_train(val, train_features, '-b 1') for key,val in train_classes.items()}
    predictions = {c:[p_1 for p_1,p_0 in svm_predict(test_classes[c],test_features,m, '-b 1')[2]] for c,m in models.items()}
    totals = [sum(p[i] for c,p in predictions.items()) for i,features in enumerate(test_features)]
    probabilities = {}
    for c,pred in predictions.items():
        probabilities[c] = [p/totals[i] for i,p in enumerate(pred)]
    
    log_loss = -sum(log(probabilities[c][i]) for i,c in enumerate(test_classes))/len(test_classes)
    print log_loss
    """

    train_data = get_data(sys.argv[1])
    test_data = get_data(sys.argv[2])

    #tokenize(train_data)
    #tokenize(test_data)
    tokens,token_to_pmi = get_tokens(TOKEN_FILE)
    train_features = []
    train_classes = {l:[] for l in LABELS.keys()}
    for i,row in enumerate(train_data):
        if DEBUG:
            print 'Getting features for %d of %d documents'%(i,len(train_data))
        train_features.append(get_features(row,tokens,token_to_pmi))
        for key,val in train_classes.items():
            train_classes[key].append(1 if row['OpenStatus'] == key else 0)


    test_features = []
    test_classes = {l:[] for l in LABELS.keys()}
    for i,row in enumerate(test_data):
        if DEBUG:
            print 'Getting features for %d of %d documents'%(i,len(test_data))
        test_features.append(get_features(row,tokens,token_to_pmi))
        for key,val in test_classes.items():
            # Dummy classes for prediction
            test_classes[key].append(0)
    models = {key:train(val, train_features, '-s 7') for key,val in train_classes.items()}
    predictions = {c:[max(0,p[0]*LABELS[c]) for p in predict(test_classes[c],test_features,m)[2]] for c,m in models.items()}
    totals = [sum(p[i] for c,p in predictions.items()) for i,features in enumerate(test_features)]
    probabilities = {}
    for c,pred in predictions.items():
        probabilities[c] = [p/totals[i] for i,p in enumerate(pred)]
    with open(DATA_FILE, 'w') as data_file:
        for i,row in enumerate(test_data):
            data_file.write(','.join([str(s) for s in [row['PostId']] + [probabilities[c][i] for c,prior in LABELS.items()]]) + '\n')
