import nltk
import sqlite3
import numpy
from scipy import sparse
from collections import defaultdict
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.svm import SVC, LinearSVC
from sklearn.feature_selection import SelectKBest, chi2
from datetime import datetime
__author__ = 'panagiotis'
from models import ColumnSelector
from models import DocVectorizer, SentimentTransformer
from models import SpellingTransformer, RemoveContractions, NegationTransformer


from sklearn.metrics import classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import mean_absolute_error as mae


# load review data
database_file = "Hotels_g189413_Crete.db"
database_path = "/home/panagiotis/Projects/Thesis/datasets/"
# database_path = "/home/pstalidis/Projects/Thesis/datasets/"
conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

data = [(review_title, review_text, review_rating)
        for (review_id, hotel_id, hotel_star, hotel_rating,
             average_clean_score, average_service_score, average_location_score,
             average_room_score, average_sleep_score, average_value_score,
             review_title, review_text, review_rating,
             review_clean_score, review_service_score, review_location_score,
             review_room_score, review_sleep_score, review_value_score)
        in source.execute("SELECT * FROM reviews")]

xtr1, xts1 = train_test_split([(r[0], r[1], 1) for r in data if r[2] > 3], train_size=5000, test_size=500, random_state=0)
xtr2, xts2 = train_test_split([(r[0], r[1], 0) for r in data if r[2] < 4], train_size=5000, test_size=500, random_state=0)

train_titles, train_review, train_target = zip(*shuffle(xtr1 + xtr2, random_state=0))
test_titles, test_review, test_target = zip(*shuffle(xts1 + xts2, random_state=0))

# pre processing of all data
t0 = datetime.now()
preprocess = Pipeline([('contract', RemoveContractions(spell_check=False, output="tokens")),
                       ('spelling', SpellingTransformer(tokenize=False, output="tokens")),
                       ('negations', NegationTransformer(tokenize=False, output="tokens"))])
# train_titles = preprocess.fit_transform(train_titles)
train_review = preprocess.fit_transform(train_review)
# test_titles = preprocess.fit_transform(test_titles)
test_review = preprocess.fit_transform(test_review)
train_target = [int(r) for r in train_target]
test_target = [int(r) for r in test_target]
print "preprocessing took", datetime.now() - t0


features = [i for i in xrange(50, 300, 5)]  # 5 min per iteration
results = defaultdict(list)

for number in features:
    print 50*"-"
    print "Number of features:", number,
    t0 = datetime.now()
    vectorizer = DocVectorizer(n_features=number, tokenize=False)
    X_train = vectorizer.fit_transform(train_review, train_target)  # should I keep a copy?
    X_test = vectorizer.transform(test_review)
    print "vectorizing time", datetime.now() - t0
    for name, clf in [("review_RBF_SVC", SVC(C=1000, gamma=0.01)),
                      ("review_LinearSVC", LinearSVC(dual=False, tol=1e-3)),
                      ("review_Naive Bayes", MultinomialNB()),
                      ("review_Max Entropy", LogisticRegression()),
                      ]:
        t0 = datetime.now()
        clf.fit(X_train, train_target)
        z_test = clf.predict(X_test)

        results[name].append((number,
                              precision_score(test_target, z_test),
                              recall_score(test_target, z_test),
                              f1_score(test_target, z_test),))
        print "classifier", name,
        print "classification time", datetime.now() - t0,
        print precision_score(test_target, z_test), recall_score(test_target, z_test), f1_score(test_target, z_test)

from matplotlib import pyplot

pyplot.figure(1)
for name in results.keys():
    features, precision, recall, f1score = zip(*results[name])
    pyplot.plot(features, precision, label=name)
# pyplot.ylim(0, 1)
pyplot.legend(loc=4)

pyplot.figure(2)
for name in results.keys():
    features, precision, recall, f1score = zip(*results[name])
    pyplot.plot(features, recall, label=name)
# pyplot.ylim(0, 1)
pyplot.legend(loc=4)

pyplot.figure(3)
for name in results.keys():
    features, precision, recall, f1score = zip(*results[name])
    pyplot.plot(features, f1score, label=name)
# pyplot.ylim(0.4, 0.8)
pyplot.legend(loc=4)

pyplot.show()
