from sklearn.neighbors import NearestCentroid
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
import nltk
import enchant
from time import time
import sqlite3


def tokenize(txt):
    stemmer = nltk.stem.PorterStemmer()
    spellcheck = enchant.Dict("en_US")
    return [stemmer.stem(word) for word in nltk.word_tokenize(txt) if (spellcheck.check(word)) and (word.isalpha())]

t0 = time()
# Read data from file into memory
database_file = "Reviews_from_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

# noinspection PyRedeclaration
ordata = [(rev, sc) for (_, _, _, rev, sc, _, _, _, _, _, _, _) in source.execute("SELECT * FROM reviews LIMIT 0,100")]
reviews, s = zip(*[(q, str(int(w))) for q, w in ordata])

y = [((w == "4") or (w == "5")) for w in s]
print y
categories = list(set(y))

# Split data to train and test sets
X_train, X_test, y_train, y_test = train_test_split(reviews, y, test_size=0.4, random_state=0)

print("loading time: %0.3fs" % (time() - t0))

# Create vectors of tfidf scores from reviews
t0 = time()
vectorizer = TfidfVectorizer(tokenizer=tokenize, stop_words=nltk.corpus.stopwords.words('english'),
                             max_df=0.7, min_df=0.1, sublinear_tf=True, use_idf=True, smooth_idf=True)

X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
print("vectorizing time: %0.3fs" % (time() - t0))

results = []


def benchmark(clf):
    print('_' * 80)
    print("Training: ")
    print(clf)
    t0 = time()
    clf.fit(X_train, y_train)
    train_time = time() - t0
    print("train time: %0.3fs" % train_time)

    t0 = time()
    pred = clf.predict(X_test)
    cm = metrics.confusion_matrix(y_test, pred)

    plt.matshow(cm)
    plt.title('Confusion matrix')
    plt.colorbar()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()

    test_time = time() - t0
    print("test time:  %0.3fs" % test_time)

    # score = metrics.f1_score(y_test, pred)  # , labels=None, pos_label="5"
    score = clf.score(X_test, y_test)
    print("f1-score:   %0.3f" % score)

    clf_descr = str(clf).split('(')[0]
    return clf_descr, score, train_time, test_time


for m in ['cosine', 'euclidean']:
        print('=' * 80)
        print("NearestCentroid, " + m + " as metric")
        results.append(benchmark(NearestCentroid(metric=m, shrink_threshold=None)))

# make some plots

indices = np.arange(len(results))

results = [[x[i] for x in results] for i in range(4)]

clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)

plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)

for i, c in zip(indices, clf_names):
    plt.text(-.3, i, c)

plt.show()

