import sys
from time import time
from optparse import OptionParser
import logging
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, HashingVectorizer
from sklearn.linear_model import RidgeClassifier, SGDClassifier, Perceptron, PassiveAggressiveClassifier
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils.extmath import density
from sklearn import metrics
import numpy as np
import sqlite3
import enchant, nltk
import matplotlib.pyplot as plt

__author__ = 'panagiotis'


def tokenize(txt):
    stemmer = nltk.stem.PorterStemmer()

    spellcheck = enchant.Dict("en_US")
    return [stemmer.stem(word) for word in nltk.word_tokenize(txt) if (spellcheck.check(word)) and (word.isalpha())]

t0 = time()
# Read data from file into memory
database_file = "Reviews_from_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

# noinspection PyRedeclaration
ordata = [(rev, sc) for (_, _, _, rev, sc, _, _, _, _, _, _, _) in source.execute("SELECT * FROM reviews")]
reviews, y = zip(*[(q, str(int(w))) for q, w in ordata])

y = [1 if int(a) > 4 else 0 for a in y]

categories = list(set(y))

# Split data to train and test sets
X_train, X_test, y_train, y_test = train_test_split(reviews, y, test_size=0.4, random_state=0)

print("loading time: %0.3fs" % (time() - t0))

"""
# Train the model
clf1 = Pipeline([("tfidf", CountVectorizer(tokenizer=tokenize, stop_words=nltk.corpus.stopwords.words('english'))),
                ("svc", MultinomialNB())])

clf1.fit(X_train, y_train)

print [w for w in clf1.predict(X_test)][:50]
print y_test[:50]
print clf1.score(X_test, y_test)


clf2 = Pipeline([("tfidf", TfidfVectorizer(tokenizer=tokenize, stop_words=nltk.corpus.stopwords.words('english'), sublinear_tf=True)),
                ("svc", LinearSVC())])


params = {"tfidf__ngram_range": [(1, 1), (1, 2)],
          "svc__C": [.01, .1, 1, 10, 100]}

gs = GridSearchCV(clf2, params, n_jobs=-1)
gs.fit(X_train, y_train)


print(gs.best_estimator_)
print(gs.best_score_)


# -------------------------------------

clf2.fit(X_train, y_train)

print [w for w in clf2.predict(X_test)][:50]
print y_test[:50]
print clf2.score(X_test, y_test)

"""
t0 = time()
# vectorizer = CountVectorizer(tokenizer=tokenize, stop_words=nltk.corpus.stopwords.words('english'), max_df=0.5)
vectorizer = TfidfVectorizer(tokenizer=tokenize, stop_words=nltk.corpus.stopwords.words('english'), sublinear_tf=True)

X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
print("vectorizing time: %0.3fs" % (time() - t0))


def benchmark(clf):
    print('_' * 80)
    print("Training: ")
    print(clf)
    t0 = time()
    try:
        clf.fit(X_train, y_train)
    except ValueError:
        pass
    train_time = time() - t0
    print("train time: %0.3fs" % train_time)

    t0 = time()
    pred = clf.predict(X_test)
    # print [t for t in pred]
    # print [s for s in y_test]
    test_time = time() - t0
    print("test time:  %0.3fs" % test_time)

    score = metrics.f1_score(y_test, pred, labels=None)  #
    print("f1-score:   %0.3f" % score)

    if hasattr(clf, 'coef_'):
        print("dimensionality: %d" % clf.coef_.shape[1])
        print("density: %f" % density(clf.coef_))
        # if opts.print_top10 and feature_names is not None:
        #     print("top 10 keywords per class:")
        #     for i, category in enumerate(categories):
        #         top10 = np.argsort(clf.coef_[i])[-10:]
        #         print(trim("%s: %s"
        #               % (category, " ".join(feature_names[top10]))))
        print()

    # if opts.print_report:
    #     print("classification report:")
    #     print(metrics.classification_report(y_test, pred,
    #                                         target_names=categories))

    # if opts.print_cm:
    #     print("confusion matrix:")
    #     print(metrics.confusion_matrix(y_test, pred))

    print()
    clf_descr = str(clf).split('(')[0]
    return clf_descr, score, train_time, test_time


results = []
for clf, name in (
        (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
        (Perceptron(n_iter=50), "Perceptron"),
        (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
        (KNeighborsClassifier(n_neighbors=10), "kNN")):
    print('=' * 80)
    print(name)
    results.append(benchmark(clf))

for penalty in ["l2", "l1"]:
    print('=' * 80)
    print("%s penalty" % penalty.upper())
    # Train Liblinear model
    results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
                                            dual=False, tol=1e-3)))

    # Train SGD model
    results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
                                           penalty=penalty)))

# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
                                       penalty="elasticnet")))

# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))

# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))


class L1LinearSVC(LinearSVC):

    def fit(self, X, y):
        # The smaller C, the stronger the regularization.
        # The more regularization, the more sparsity.
        self.transformer_ = LinearSVC(penalty="l1",
                                      dual=False, tol=1e-3)
        X = self.transformer_.fit_transform(X, y)
        return LinearSVC.fit(self, X, y)

    def predict(self, X):
        X = self.transformer_.transform(X)
        return LinearSVC.predict(self, X)

print('=' * 80)
print("LinearSVC with L1-based feature selection")
results.append(benchmark(L1LinearSVC()))


# make some plots

indices = np.arange(len(results))

results = [[x[i] for x in results] for i in range(4)]

clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)

plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)

for i, c in zip(indices, clf_names):
    plt.text(-.3, i, c)

plt.show()
